]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
function.h: Flatten file.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
23a5b65a 2 Copyright (C) 1993-2014 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
bc27e96c 20#include "config.h"
4bd048ef 21#include "system.h"
4977bab6
ZW
22#include "coretypes.h"
23#include "tm.h"
4816b8e4
NC
24#include "rtl.h"
25#include "tree.h"
d8a2d370
DN
26#include "stor-layout.h"
27#include "varasm.h"
28#include "stringpool.h"
29#include "calls.h"
4816b8e4 30#include "tm_p.h"
8f90be4c 31#include "mcore.h"
8f90be4c
NC
32#include "regs.h"
33#include "hard-reg-set.h"
8f90be4c
NC
34#include "insn-config.h"
35#include "conditions.h"
8f90be4c
NC
36#include "output.h"
37#include "insn-attr.h"
38#include "flags.h"
39#include "obstack.h"
40#include "expr.h"
41#include "reload.h"
42#include "recog.h"
83685514
AM
43#include "hashtab.h"
44#include "hash-set.h"
45#include "vec.h"
46#include "machmode.h"
47#include "input.h"
8f90be4c
NC
48#include "function.h"
49#include "ggc.h"
718f9c0f 50#include "diagnostic-core.h"
672a6f42
NB
51#include "target.h"
52#include "target-def.h"
899cc0f4 53#include "df.h"
9b2b7279 54#include "builtins.h"
8f90be4c 55
8f90be4c
NC
56/* For dumping information about frame sizes. */
57char * mcore_current_function_name = 0;
58long mcore_current_compilation_timestamp = 0;
59
60/* Global variables for machine-dependent things. */
61
8f90be4c
NC
62/* Provides the class number of the smallest class containing
63 reg number. */
5a82ecd9 64const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
65{
66 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
67 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
68 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
69 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
70 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
71};
72
f27cd94d
NC
73struct mcore_frame
74{
08903e08
SB
75 int arg_size; /* Stdarg spills (bytes). */
76 int reg_size; /* Non-volatile reg saves (bytes). */
77 int reg_mask; /* Non-volatile reg saves. */
78 int local_size; /* Locals. */
79 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
80 int pad_outbound;
81 int pad_local;
82 int pad_reg;
83 /* Describe the steps we'll use to grow it. */
08903e08 84#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
85 int growth[MAX_STACK_GROWS];
86 int arg_offset;
87 int reg_offset;
88 int reg_growth;
89 int local_growth;
90};
91
92typedef enum
93{
94 COND_NO,
95 COND_MOV_INSN,
96 COND_CLR_INSN,
97 COND_INC_INSN,
98 COND_DEC_INSN,
99 COND_BRANCH_INSN
100}
101cond_type;
102
08903e08
SB
103static void output_stack_adjust (int, int);
104static int calc_live_regs (int *);
e0416079 105static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
08903e08 106static const char * output_inline_const (enum machine_mode, rtx *);
08903e08 107static void layout_mcore_frame (struct mcore_frame *);
d5cc9181 108static void mcore_setup_incoming_varargs (cumulative_args_t, enum machine_mode, tree, int *, int);
08903e08 109static cond_type is_cond_candidate (rtx);
b32d5189
DM
110static rtx_insn *emit_new_cond_insn (rtx, int);
111static rtx_insn *conditionalize_block (rtx_insn *);
08903e08
SB
112static void conditionalize_optimization (void);
113static void mcore_reorg (void);
586de218 114static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
08903e08
SB
115static void mcore_mark_dllexport (tree);
116static void mcore_mark_dllimport (tree);
117static int mcore_dllexport_p (tree);
118static int mcore_dllimport_p (tree);
08903e08 119static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 120#ifdef OBJECT_FORMAT_ELF
08903e08 121static void mcore_asm_named_section (const char *,
c18a5b6c 122 unsigned int, tree);
ede75ee8 123#endif
349f851e
NF
124static void mcore_print_operand (FILE *, rtx, int);
125static void mcore_print_operand_address (FILE *, rtx);
126static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
127static void mcore_unique_section (tree, int);
128static void mcore_encode_section_info (tree, rtx, int);
129static const char *mcore_strip_name_encoding (const char *);
d96be87b
JBG
130static int mcore_const_costs (rtx, RTX_CODE);
131static int mcore_and_cost (rtx);
132static int mcore_ior_cost (rtx);
68f932c4
RS
133static bool mcore_rtx_costs (rtx, int, int, int,
134 int *, bool);
09a2b93a 135static void mcore_external_libcall (rtx);
586de218 136static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 137static int mcore_arg_partial_bytes (cumulative_args_t,
78a52f11
RH
138 enum machine_mode,
139 tree, bool);
d5cc9181 140static rtx mcore_function_arg (cumulative_args_t,
4665ac17
NF
141 enum machine_mode,
142 const_tree, bool);
d5cc9181 143static void mcore_function_arg_advance (cumulative_args_t,
4665ac17
NF
144 enum machine_mode,
145 const_tree, bool);
c2ed6cf8
NF
146static unsigned int mcore_function_arg_boundary (enum machine_mode,
147 const_tree);
71e0af3c
RH
148static void mcore_asm_trampoline_template (FILE *);
149static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 150static bool mcore_warn_func_return (tree);
c5387660 151static void mcore_option_override (void);
1a627b35 152static bool mcore_legitimate_constant_p (enum machine_mode, rtx);
5a82ecd9
ILT
153\f
154/* MCore specific attributes. */
155
156static const struct attribute_spec mcore_attribute_table[] =
157{
62d784f7
KT
158 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
159 affects_type_identity } */
160 { "dllexport", 0, 0, true, false, false, NULL, false },
161 { "dllimport", 0, 0, true, false, false, NULL, false },
162 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
163 false },
164 { NULL, 0, 0, false, false, false, NULL, false }
5a82ecd9 165};
672a6f42
NB
166\f
167/* Initialize the GCC target structure. */
09a2b93a
KH
168#undef TARGET_ASM_EXTERNAL_LIBCALL
169#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
170
b2ca3702 171#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
172#undef TARGET_MERGE_DECL_ATTRIBUTES
173#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
174#endif
175
301d03af 176#ifdef OBJECT_FORMAT_ELF
08903e08 177#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 178#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 179#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
180#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
181#endif
182
349f851e
NF
183#undef TARGET_PRINT_OPERAND
184#define TARGET_PRINT_OPERAND mcore_print_operand
185#undef TARGET_PRINT_OPERAND_ADDRESS
186#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
187#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
188#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
189
08903e08
SB
190#undef TARGET_ATTRIBUTE_TABLE
191#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
192#undef TARGET_ASM_UNIQUE_SECTION
193#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
194#undef TARGET_ASM_FUNCTION_RODATA_SECTION
195#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
196#undef TARGET_ENCODE_SECTION_INFO
197#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
198#undef TARGET_STRIP_NAME_ENCODING
199#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
200#undef TARGET_RTX_COSTS
201#define TARGET_RTX_COSTS mcore_rtx_costs
202#undef TARGET_ADDRESS_COST
b413068c 203#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
204#undef TARGET_MACHINE_DEPENDENT_REORG
205#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 206
cde0f3fd
PB
207#undef TARGET_PROMOTE_FUNCTION_MODE
208#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 209#undef TARGET_PROMOTE_PROTOTYPES
586de218 210#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 211
09a2b93a
KH
212#undef TARGET_RETURN_IN_MEMORY
213#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
214#undef TARGET_MUST_PASS_IN_STACK
215#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
216#undef TARGET_PASS_BY_REFERENCE
217#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
218#undef TARGET_ARG_PARTIAL_BYTES
219#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
220#undef TARGET_FUNCTION_ARG
221#define TARGET_FUNCTION_ARG mcore_function_arg
222#undef TARGET_FUNCTION_ARG_ADVANCE
223#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
224#undef TARGET_FUNCTION_ARG_BOUNDARY
225#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
226
227#undef TARGET_SETUP_INCOMING_VARARGS
228#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
229
71e0af3c
RH
230#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
231#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
232#undef TARGET_TRAMPOLINE_INIT
233#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
234
c5387660
JM
235#undef TARGET_OPTION_OVERRIDE
236#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 237
1a627b35
RS
238#undef TARGET_LEGITIMATE_CONSTANT_P
239#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
240
d45eae79
SL
241#undef TARGET_WARN_FUNC_RETURN
242#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
243
f6897b10 244struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 245\f
8f90be4c
NC
246/* Adjust the stack and return the number of bytes taken to do it. */
247static void
08903e08 248output_stack_adjust (int direction, int size)
8f90be4c 249{
4816b8e4 250 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
251 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
252 {
f1c25d3b 253 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 254 rtx memref;
08903e08 255
8f90be4c
NC
256 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
257 do
258 {
259 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 260 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
261 MEM_VOLATILE_P (memref) = 1;
262 emit_insn (gen_movsi (memref, stack_pointer_rtx));
263 size -= mcore_stack_increment;
264 }
265 while (size > mcore_stack_increment);
266
4816b8e4
NC
267 /* SIZE is now the residual for the last adjustment,
268 which doesn't require a probe. */
8f90be4c
NC
269 }
270
271 if (size)
272 {
273 rtx insn;
274 rtx val = GEN_INT (size);
275
276 if (size > 32)
277 {
f1c25d3b 278 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
279 emit_insn (gen_movsi (nval, val));
280 val = nval;
281 }
282
283 if (direction > 0)
284 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
285 else
286 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
287
288 emit_insn (insn);
289 }
290}
291
4816b8e4
NC
292/* Work out the registers which need to be saved,
293 both as a mask and a count. */
294
8f90be4c 295static int
08903e08 296calc_live_regs (int * count)
8f90be4c
NC
297{
298 int reg;
299 int live_regs_mask = 0;
300
301 * count = 0;
302
303 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
304 {
6fb5fa3c 305 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
306 {
307 (*count)++;
308 live_regs_mask |= (1 << reg);
309 }
310 }
311
312 return live_regs_mask;
313}
314
315/* Print the operand address in x to the stream. */
4816b8e4 316
349f851e 317static void
08903e08 318mcore_print_operand_address (FILE * stream, rtx x)
8f90be4c
NC
319{
320 switch (GET_CODE (x))
321 {
322 case REG:
323 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
324 break;
325
326 case PLUS:
327 {
328 rtx base = XEXP (x, 0);
329 rtx index = XEXP (x, 1);
330
331 if (GET_CODE (base) != REG)
332 {
333 /* Ensure that BASE is a register (one of them must be). */
334 rtx temp = base;
335 base = index;
336 index = temp;
337 }
338
339 switch (GET_CODE (index))
340 {
341 case CONST_INT:
fd7b8952
KG
342 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
343 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
344 break;
345
346 default:
6e1f65b5 347 gcc_unreachable ();
8f90be4c
NC
348 }
349 }
350
351 break;
352
353 default:
354 output_addr_const (stream, x);
355 break;
356 }
357}
358
349f851e
NF
359static bool
360mcore_print_operand_punct_valid_p (unsigned char code)
361{
362 return (code == '.' || code == '#' || code == '*' || code == '^'
363 || code == '!');
364}
365
8f90be4c
NC
366/* Print operand x (an rtx) in assembler syntax to file stream
367 according to modifier code.
368
112cdef5 369 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
370 a double word value
371 'O' print a constant without the #
372 'M' print a constant as its negative
373 'P' print log2 of a power of two
374 'Q' print log2 of an inverse of a power of two
375 'U' print register for ldm/stm instruction
4816b8e4
NC
376 'X' print byte number for xtrbN instruction. */
377
349f851e 378static void
08903e08 379mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
380{
381 switch (code)
382 {
383 case 'N':
384 if (INTVAL(x) == -1)
385 fprintf (asm_out_file, "32");
386 else
387 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
388 break;
389 case 'P':
6e3a343d 390 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
391 break;
392 case 'Q':
393 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
394 break;
395 case 'O':
fd7b8952 396 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
397 break;
398 case 'M':
fd7b8952 399 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
400 break;
401 case 'R':
402 /* Next location along in memory or register. */
403 switch (GET_CODE (x))
404 {
405 case REG:
406 fputs (reg_names[REGNO (x) + 1], (stream));
407 break;
408 case MEM:
b72f00af
RK
409 mcore_print_operand_address
410 (stream, XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
411 break;
412 default:
6e1f65b5 413 gcc_unreachable ();
8f90be4c
NC
414 }
415 break;
416 case 'U':
417 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
418 reg_names[REGNO (x) + 3]);
419 break;
420 case 'x':
fd7b8952 421 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
422 break;
423 case 'X':
fd7b8952 424 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
425 break;
426
427 default:
428 switch (GET_CODE (x))
429 {
430 case REG:
431 fputs (reg_names[REGNO (x)], (stream));
432 break;
433 case MEM:
434 output_address (XEXP (x, 0));
435 break;
436 default:
437 output_addr_const (stream, x);
438 break;
439 }
440 break;
441 }
442}
443
444/* What does a constant cost ? */
4816b8e4 445
3c50106f 446static int
08903e08 447mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 448{
6e3a343d 449 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
450
451 /* Easy constants. */
452 if ( CONST_OK_FOR_I (val)
453 || CONST_OK_FOR_M (val)
454 || CONST_OK_FOR_N (val)
455 || (code == PLUS && CONST_OK_FOR_L (val)))
456 return 1;
457 else if (code == AND
458 && ( CONST_OK_FOR_M (~val)
459 || CONST_OK_FOR_N (~val)))
460 return 2;
461 else if (code == PLUS
462 && ( CONST_OK_FOR_I (-val)
463 || CONST_OK_FOR_M (-val)
464 || CONST_OK_FOR_N (-val)))
465 return 2;
466
467 return 5;
468}
469
470/* What does an and instruction cost - we do this b/c immediates may
471 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
472 out. Otherwise we'll get bad code (multiple reloads of the same const). */
473
3c50106f 474static int
08903e08 475mcore_and_cost (rtx x)
8f90be4c 476{
6e3a343d 477 HOST_WIDE_INT val;
8f90be4c
NC
478
479 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
480 return 2;
481
482 val = INTVAL (XEXP (x, 1));
483
4816b8e4 484 /* Do it directly. */
8f90be4c
NC
485 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
486 return 2;
487 /* Takes one instruction to load. */
488 else if (const_ok_for_mcore (val))
489 return 3;
490 /* Takes two instructions to load. */
491 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
492 return 4;
493
4816b8e4 494 /* Takes a lrw to load. */
8f90be4c
NC
495 return 5;
496}
497
4816b8e4
NC
498/* What does an or cost - see and_cost(). */
499
3c50106f 500static int
08903e08 501mcore_ior_cost (rtx x)
8f90be4c 502{
6e3a343d 503 HOST_WIDE_INT val;
8f90be4c
NC
504
505 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
506 return 2;
507
508 val = INTVAL (XEXP (x, 1));
509
4816b8e4 510 /* Do it directly with bclri. */
8f90be4c
NC
511 if (CONST_OK_FOR_M (val))
512 return 2;
4816b8e4 513 /* Takes one instruction to load. */
8f90be4c
NC
514 else if (const_ok_for_mcore (val))
515 return 3;
4816b8e4 516 /* Takes two instructions to load. */
8f90be4c
NC
517 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
518 return 4;
519
4816b8e4 520 /* Takes a lrw to load. */
8f90be4c
NC
521 return 5;
522}
523
3c50106f 524static bool
68f932c4
RS
525mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
526 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
527{
528 switch (code)
529 {
530 case CONST_INT:
5a82ecd9 531 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
532 return true;
533 case CONST:
534 case LABEL_REF:
535 case SYMBOL_REF:
536 *total = 5;
537 return true;
538 case CONST_DOUBLE:
539 *total = 10;
540 return true;
541
542 case AND:
543 *total = COSTS_N_INSNS (mcore_and_cost (x));
544 return true;
545
546 case IOR:
547 *total = COSTS_N_INSNS (mcore_ior_cost (x));
548 return true;
549
550 case DIV:
551 case UDIV:
552 case MOD:
553 case UMOD:
554 case FLOAT:
555 case FIX:
556 *total = COSTS_N_INSNS (100);
557 return true;
558
559 default:
560 return false;
561 }
562}
563
f90b7a5a
PB
564/* Prepare the operands for a comparison. Return whether the branch/setcc
565 should reverse the operands. */
4816b8e4 566
f90b7a5a
PB
567bool
568mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 569{
f90b7a5a
PB
570 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
571 bool invert;
572
8f90be4c
NC
573 if (GET_CODE (op1) == CONST_INT)
574 {
6e3a343d 575 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
576
577 switch (code)
578 {
f90b7a5a
PB
579 case GTU:
580 /* Unsigned > 0 is the same as != 0; everything else is converted
581 below to LEU (reversed cmphs). */
582 if (val == 0)
583 code = NE;
584 break;
585
586 /* Check whether (LE A imm) can become (LT A imm + 1),
587 or (GT A imm) can become (GE A imm + 1). */
588 case GT:
8f90be4c
NC
589 case LE:
590 if (CONST_OK_FOR_J (val + 1))
591 {
f90b7a5a
PB
592 op1 = GEN_INT (val + 1);
593 code = code == LE ? LT : GE;
8f90be4c
NC
594 }
595 break;
596
597 default:
598 break;
599 }
600 }
f90b7a5a 601
8f90be4c
NC
602 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
603 op1 = force_reg (SImode, op1);
604
605 /* cmpnei: 0-31 (K immediate)
4816b8e4 606 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 607 invert = false;
8f90be4c
NC
608 switch (code)
609 {
4816b8e4 610 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 611 code = NE;
f90b7a5a 612 invert = true;
08903e08 613 /* Drop through. */
4816b8e4
NC
614
615 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
616 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
617 op1 = force_reg (SImode, op1);
618 break;
619
4816b8e4 620 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 621 code = GT;
f90b7a5a 622 invert = true;
08903e08 623 /* Drop through. */
4816b8e4
NC
624
625 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
626 if (GET_CODE (op1) == CONST_INT)
627 op1 = force_reg (SImode, op1);
628 break;
629
4816b8e4 630 case GE: /* Use inverted condition, cmplt. */
8f90be4c 631 code = LT;
f90b7a5a 632 invert = true;
08903e08 633 /* Drop through. */
4816b8e4
NC
634
635 case LT: /* Use normal condition, cmplt. */
8f90be4c 636 if (GET_CODE (op1) == CONST_INT &&
08903e08 637 /* covered by btsti x,31. */
8f90be4c
NC
638 INTVAL (op1) != 0 &&
639 ! CONST_OK_FOR_J (INTVAL (op1)))
640 op1 = force_reg (SImode, op1);
641 break;
642
4816b8e4 643 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 644 /* We coped with unsigned > 0 above. */
6e1f65b5 645 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 646 code = LEU;
f90b7a5a 647 invert = true;
08903e08 648 /* Drop through. */
4816b8e4 649
14bc6742 650 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
651 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
652 op1 = force_reg (SImode, op1);
653 break;
654
4816b8e4 655 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 656 code = GEU;
f90b7a5a 657 invert = true;
08903e08 658 /* Drop through. */
4816b8e4
NC
659
660 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
661 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
662 op1 = force_reg (SImode, op1);
663 break;
664
665 default:
666 break;
667 }
668
f90b7a5a
PB
669 emit_insn (gen_rtx_SET (VOIDmode,
670 cc_reg,
671 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
672 return invert;
8f90be4c
NC
673}
674
8f90be4c 675int
08903e08 676mcore_symbolic_address_p (rtx x)
8f90be4c
NC
677{
678 switch (GET_CODE (x))
679 {
680 case SYMBOL_REF:
681 case LABEL_REF:
682 return 1;
683 case CONST:
684 x = XEXP (x, 0);
685 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
686 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
687 && GET_CODE (XEXP (x, 1)) == CONST_INT);
688 default:
689 return 0;
690 }
691}
692
8f90be4c 693/* Functions to output assembly code for a function call. */
f27cd94d 694
8f90be4c 695char *
08903e08 696mcore_output_call (rtx operands[], int index)
8f90be4c
NC
697{
698 static char buffer[20];
699 rtx addr = operands [index];
700
701 if (REG_P (addr))
702 {
703 if (TARGET_CG_DATA)
704 {
6e1f65b5 705 gcc_assert (mcore_current_function_name);
8f90be4c
NC
706
707 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
708 "unknown", 1);
709 }
710
711 sprintf (buffer, "jsr\t%%%d", index);
712 }
713 else
714 {
715 if (TARGET_CG_DATA)
716 {
6e1f65b5
NS
717 gcc_assert (mcore_current_function_name);
718 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 719
6e1f65b5
NS
720 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
721 XSTR (addr, 0), 0);
8f90be4c
NC
722 }
723
724 sprintf (buffer, "jbsr\t%%%d", index);
725 }
726
727 return buffer;
728}
729
730/* Can we load a constant with a single instruction ? */
4816b8e4 731
54d58eaf 732int
6e3a343d 733const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
734{
735 if (value >= 0 && value <= 127)
736 return 1;
737
738 /* Try exact power of two. */
6e3a343d 739 if (CONST_OK_FOR_M (value))
8f90be4c
NC
740 return 1;
741
14bc6742 742 /* Try exact power of two - 1. */
6e3a343d 743 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
744 return 1;
745
746 return 0;
747}
748
749/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 750
8f90be4c 751int
6e3a343d 752mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 753{
6e3a343d 754 HOST_WIDE_INT x, y;
8f90be4c
NC
755
756 return try_constant_tricks (value, & x, & y) > 0;
757}
758
759/* Are we loading the constant using a not ? */
4816b8e4 760
8f90be4c 761int
6e3a343d 762mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 763{
6e3a343d 764 HOST_WIDE_INT x, y;
8f90be4c
NC
765
766 return try_constant_tricks (value, & x, & y) == 2;
767}
768
769/* Try tricks to load a constant inline and return the trick number if
770 success (0 is non-inlinable).
4816b8e4
NC
771
772 0: not inlinable
773 1: single instruction (do the usual thing)
774 2: single insn followed by a 'not'
775 3: single insn followed by a subi
776 4: single insn followed by an addi
777 5: single insn followed by rsubi
778 6: single insn followed by bseti
779 7: single insn followed by bclri
780 8: single insn followed by rotli
781 9: single insn followed by lsli
782 10: single insn followed by ixh
783 11: single insn followed by ixw. */
8f90be4c
NC
784
785static int
6e3a343d 786try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 787{
6e3a343d
NC
788 HOST_WIDE_INT i;
789 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
790
791 if (const_ok_for_mcore (value))
4816b8e4 792 return 1; /* Do the usual thing. */
8f90be4c 793
6e3a343d
NC
794 if (! TARGET_HARDLIT)
795 return 0;
796
797 if (const_ok_for_mcore (~value))
798 {
799 *x = ~value;
800 return 2;
801 }
802
803 for (i = 1; i <= 32; i++)
8f90be4c 804 {
6e3a343d 805 if (const_ok_for_mcore (value - i))
8f90be4c 806 {
6e3a343d
NC
807 *x = value - i;
808 *y = i;
809
810 return 3;
8f90be4c 811 }
6e3a343d
NC
812
813 if (const_ok_for_mcore (value + i))
8f90be4c 814 {
6e3a343d
NC
815 *x = value + i;
816 *y = i;
817
818 return 4;
8f90be4c 819 }
6e3a343d
NC
820 }
821
822 bit = 0x80000000ULL;
823
824 for (i = 0; i <= 31; i++)
825 {
826 if (const_ok_for_mcore (i - value))
8f90be4c 827 {
6e3a343d
NC
828 *x = i - value;
829 *y = i;
830
831 return 5;
8f90be4c 832 }
6e3a343d
NC
833
834 if (const_ok_for_mcore (value & ~bit))
8f90be4c 835 {
6e3a343d
NC
836 *y = bit;
837 *x = value & ~bit;
838 return 6;
8f90be4c 839 }
6e3a343d
NC
840
841 if (const_ok_for_mcore (value | bit))
8f90be4c 842 {
6e3a343d
NC
843 *y = ~bit;
844 *x = value | bit;
845
846 return 7;
8f90be4c 847 }
6e3a343d
NC
848
849 bit >>= 1;
850 }
851
852 shf = value;
853 rot = value;
854
855 for (i = 1; i < 31; i++)
856 {
857 int c;
858
859 /* MCore has rotate left. */
860 c = rot << 31;
861 rot >>= 1;
862 rot &= 0x7FFFFFFF;
863 rot |= c; /* Simulate rotate. */
864
865 if (const_ok_for_mcore (rot))
8f90be4c 866 {
6e3a343d
NC
867 *y = i;
868 *x = rot;
869
870 return 8;
871 }
872
873 if (shf & 1)
874 shf = 0; /* Can't use logical shift, low order bit is one. */
875
876 shf >>= 1;
877
878 if (shf != 0 && const_ok_for_mcore (shf))
879 {
880 *y = i;
881 *x = shf;
882
883 return 9;
8f90be4c
NC
884 }
885 }
6e3a343d
NC
886
887 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
888 {
889 *x = value / 3;
890
891 return 10;
892 }
893
894 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
895 {
896 *x = value / 5;
897
898 return 11;
899 }
8f90be4c
NC
900
901 return 0;
902}
903
8f90be4c
NC
904/* Check whether reg is dead at first. This is done by searching ahead
905 for either the next use (i.e., reg is live), a death note, or a set of
906 reg. Don't just use dead_or_set_p() since reload does not always mark
907 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
908 can ignore subregs by extracting the actual register. BRC */
909
8f90be4c 910int
b32d5189 911mcore_is_dead (rtx_insn *first, rtx reg)
8f90be4c 912{
b32d5189 913 rtx_insn *insn;
8f90be4c
NC
914
915 /* For mcore, subregs can't live independently of their parent regs. */
916 if (GET_CODE (reg) == SUBREG)
917 reg = SUBREG_REG (reg);
918
919 /* Dies immediately. */
920 if (dead_or_set_p (first, reg))
921 return 1;
922
923 /* Look for conclusive evidence of live/death, otherwise we have
924 to assume that it is live. */
925 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
926 {
b64925dc 927 if (JUMP_P (insn))
8f90be4c
NC
928 return 0; /* We lose track, assume it is alive. */
929
b64925dc 930 else if (CALL_P (insn))
8f90be4c
NC
931 {
932 /* Call's might use it for target or register parms. */
933 if (reg_referenced_p (reg, PATTERN (insn))
934 || find_reg_fusage (insn, USE, reg))
935 return 0;
936 else if (dead_or_set_p (insn, reg))
937 return 1;
938 }
b64925dc 939 else if (NONJUMP_INSN_P (insn))
8f90be4c
NC
940 {
941 if (reg_referenced_p (reg, PATTERN (insn)))
942 return 0;
943 else if (dead_or_set_p (insn, reg))
944 return 1;
945 }
946 }
947
1e5f1716 948 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
949 that control flow hid the use from us -- "I'm not dead yet". */
950 return 0;
951}
952
8f90be4c 953/* Count the number of ones in mask. */
4816b8e4 954
8f90be4c 955int
6e3a343d 956mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 957{
4816b8e4 958 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
959 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
960 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
961 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
962 mask = ((mask >> 8) + mask);
963
964 return (mask + (mask >> 16)) & 0xff;
965}
966
4816b8e4
NC
967/* Count the number of zeros in mask. */
968
8f90be4c 969int
6e3a343d 970mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
971{
972 return 32 - mcore_num_ones (mask);
973}
974
975/* Determine byte being masked. */
4816b8e4 976
8f90be4c 977int
08903e08 978mcore_byte_offset (unsigned int mask)
8f90be4c 979{
11f9ed1a 980 if (mask == 0x00ffffffL)
8f90be4c 981 return 0;
11f9ed1a 982 else if (mask == 0xff00ffffL)
8f90be4c 983 return 1;
11f9ed1a 984 else if (mask == 0xffff00ffL)
8f90be4c 985 return 2;
11f9ed1a 986 else if (mask == 0xffffff00L)
8f90be4c
NC
987 return 3;
988
989 return -1;
990}
991
992/* Determine halfword being masked. */
4816b8e4 993
8f90be4c 994int
08903e08 995mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
996{
997 if (mask == 0x0000ffffL)
998 return 0;
11f9ed1a 999 else if (mask == 0xffff0000L)
8f90be4c
NC
1000 return 1;
1001
1002 return -1;
1003}
1004
1005/* Output a series of bseti's corresponding to mask. */
4816b8e4 1006
f27cd94d 1007const char *
08903e08 1008mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
1009{
1010 rtx out_operands[2];
1011 int bit;
1012
1013 out_operands[0] = dst;
1014
1015 for (bit = 0; bit < 32; bit++)
1016 {
1017 if ((mask & 0x1) == 0x1)
1018 {
1019 out_operands[1] = GEN_INT (bit);
1020
1021 output_asm_insn ("bseti\t%0,%1", out_operands);
1022 }
1023 mask >>= 1;
1024 }
1025
1026 return "";
1027}
1028
1029/* Output a series of bclri's corresponding to mask. */
4816b8e4 1030
f27cd94d 1031const char *
08903e08 1032mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1033{
1034 rtx out_operands[2];
1035 int bit;
1036
1037 out_operands[0] = dst;
1038
1039 for (bit = 0; bit < 32; bit++)
1040 {
1041 if ((mask & 0x1) == 0x0)
1042 {
1043 out_operands[1] = GEN_INT (bit);
1044
1045 output_asm_insn ("bclri\t%0,%1", out_operands);
1046 }
1047
1048 mask >>= 1;
1049 }
1050
1051 return "";
1052}
1053
1054/* Output a conditional move of two constants that are +/- 1 within each
1055 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1056 really worth the effort. */
4816b8e4 1057
f27cd94d 1058const char *
08903e08 1059mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1060{
6e3a343d
NC
1061 HOST_WIDE_INT load_value;
1062 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1063 rtx out_operands[4];
1064
1065 out_operands[0] = operands[0];
1066
4816b8e4 1067 /* Check to see which constant is loadable. */
8f90be4c
NC
1068 if (const_ok_for_mcore (INTVAL (operands[1])))
1069 {
1070 out_operands[1] = operands[1];
1071 out_operands[2] = operands[2];
1072 }
1073 else if (const_ok_for_mcore (INTVAL (operands[2])))
1074 {
1075 out_operands[1] = operands[2];
1076 out_operands[2] = operands[1];
1077
4816b8e4 1078 /* Complement test since constants are swapped. */
8f90be4c
NC
1079 cmp_t = (cmp_t == 0);
1080 }
1081 load_value = INTVAL (out_operands[1]);
1082 adjust_value = INTVAL (out_operands[2]);
1083
4816b8e4 1084 /* First output the test if folded into the pattern. */
8f90be4c
NC
1085
1086 if (test)
1087 output_asm_insn (test, operands);
1088
4816b8e4 1089 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1090 generated with a single instruction. maybe add general inlinable
1091 constants later (this will increase the # of patterns since the
4816b8e4 1092 instruction sequence has a different length attribute). */
8f90be4c
NC
1093 if (load_value >= 0 && load_value <= 127)
1094 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1095 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1096 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1097 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1098 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1099
4816b8e4 1100 /* Output the constant adjustment. */
8f90be4c
NC
1101 if (load_value > adjust_value)
1102 {
1103 if (cmp_t)
1104 output_asm_insn ("decf\t%0", out_operands);
1105 else
1106 output_asm_insn ("dect\t%0", out_operands);
1107 }
1108 else
1109 {
1110 if (cmp_t)
1111 output_asm_insn ("incf\t%0", out_operands);
1112 else
1113 output_asm_insn ("inct\t%0", out_operands);
1114 }
1115
1116 return "";
1117}
1118
1119/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1120 by an and (i.e. combine the not and the and into andn). BRC */
1121
f27cd94d 1122const char *
08903e08 1123mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1124{
6e3a343d 1125 HOST_WIDE_INT x, y;
8f90be4c 1126 rtx out_operands[3];
f27cd94d 1127 const char * load_op;
8f90be4c 1128 char buf[256];
6e1f65b5 1129 int trick_no;
8f90be4c 1130
6e1f65b5
NS
1131 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1132 gcc_assert (trick_no == 2);
8f90be4c
NC
1133
1134 out_operands[0] = operands[0];
6e3a343d 1135 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1136 out_operands[2] = operands[2];
1137
1138 if (x >= 0 && x <= 127)
1139 load_op = "movi\t%0,%1";
4816b8e4
NC
1140
1141 /* Try exact power of two. */
6e3a343d 1142 else if (CONST_OK_FOR_M (x))
8f90be4c 1143 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1144
1145 /* Try exact power of two - 1. */
6e3a343d 1146 else if (CONST_OK_FOR_N (x))
8f90be4c 1147 load_op = "bmaski\t%0,%N1";
4816b8e4 1148
6e3a343d
NC
1149 else
1150 {
1151 load_op = "BADMOVI-andn\t%0, %1";
1152 gcc_unreachable ();
1153 }
8f90be4c
NC
1154
1155 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1156 output_asm_insn (buf, out_operands);
1157
1158 return "";
1159}
1160
1161/* Output an inline constant. */
4816b8e4 1162
f27cd94d 1163static const char *
08903e08 1164output_inline_const (enum machine_mode mode, rtx operands[])
8f90be4c 1165{
6e3a343d 1166 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1167 int trick_no;
1168 rtx out_operands[3];
1169 char buf[256];
1170 char load_op[256];
f27cd94d 1171 const char *dst_fmt;
6e3a343d 1172 HOST_WIDE_INT value;
8f90be4c
NC
1173
1174 value = INTVAL (operands[1]);
8f90be4c 1175
6e1f65b5
NS
1176 trick_no = try_constant_tricks (value, &x, &y);
1177 /* lrw's are handled separately: Large inlinable constants never get
1178 turned into lrw's. Our caller uses try_constant_tricks to back
1179 off to an lrw rather than calling this routine. */
1180 gcc_assert (trick_no != 0);
1181
8f90be4c
NC
1182 if (trick_no == 1)
1183 x = value;
1184
4816b8e4 1185 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1186 out_operands[0] = operands[0];
1187 out_operands[1] = GEN_INT (x);
1188
1189 if (trick_no > 2)
1190 out_operands[2] = GEN_INT (y);
1191
4816b8e4 1192 /* Select dst format based on mode. */
8f90be4c
NC
1193 if (mode == DImode && (! TARGET_LITTLE_END))
1194 dst_fmt = "%R0";
1195 else
1196 dst_fmt = "%0";
1197
1198 if (x >= 0 && x <= 127)
1199 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1200
8f90be4c 1201 /* Try exact power of two. */
6e3a343d 1202 else if (CONST_OK_FOR_M (x))
8f90be4c 1203 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1204
1205 /* Try exact power of two - 1. */
6e3a343d 1206 else if (CONST_OK_FOR_N (x))
8f90be4c 1207 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1208
6e3a343d
NC
1209 else
1210 {
1211 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1212 gcc_unreachable ();
1213 }
8f90be4c
NC
1214
1215 switch (trick_no)
1216 {
1217 case 1:
1218 strcpy (buf, load_op);
1219 break;
1220 case 2: /* not */
6e3a343d 1221 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1222 break;
1223 case 3: /* add */
6e3a343d 1224 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1225 break;
1226 case 4: /* sub */
6e3a343d 1227 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1228 break;
1229 case 5: /* rsub */
4816b8e4 1230 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1231 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1232 break;
6e3a343d
NC
1233 case 6: /* bseti */
1234 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1235 break;
1236 case 7: /* bclr */
6e3a343d 1237 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1238 break;
1239 case 8: /* rotl */
6e3a343d 1240 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1241 break;
1242 case 9: /* lsl */
6e3a343d 1243 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1244 break;
1245 case 10: /* ixh */
6e3a343d 1246 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1247 break;
1248 case 11: /* ixw */
6e3a343d 1249 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1250 break;
1251 default:
1252 return "";
1253 }
1254
1255 output_asm_insn (buf, out_operands);
1256
1257 return "";
1258}
1259
1260/* Output a move of a word or less value. */
4816b8e4 1261
f27cd94d 1262const char *
08903e08
SB
1263mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1264 enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1265{
1266 rtx dst = operands[0];
1267 rtx src = operands[1];
1268
1269 if (GET_CODE (dst) == REG)
1270 {
1271 if (GET_CODE (src) == REG)
1272 {
1273 if (REGNO (src) == CC_REG) /* r-c */
1274 return "mvc\t%0";
1275 else
1276 return "mov\t%0,%1"; /* r-r*/
1277 }
1278 else if (GET_CODE (src) == MEM)
1279 {
1280 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1281 return "lrw\t%0,[%1]"; /* a-R */
1282 else
f0f4da32
RS
1283 switch (GET_MODE (src)) /* r-m */
1284 {
1285 case SImode:
1286 return "ldw\t%0,%1";
1287 case HImode:
1288 return "ld.h\t%0,%1";
1289 case QImode:
1290 return "ld.b\t%0,%1";
1291 default:
6e1f65b5 1292 gcc_unreachable ();
f0f4da32 1293 }
8f90be4c
NC
1294 }
1295 else if (GET_CODE (src) == CONST_INT)
1296 {
6e3a343d 1297 HOST_WIDE_INT x, y;
8f90be4c
NC
1298
1299 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1300 return "movi\t%0,%1";
1301 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1302 return "bgeni\t%0,%P1\t// %1 %x1";
1303 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1304 return "bmaski\t%0,%N1\t// %1 %x1";
1305 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1306 return output_inline_const (SImode, operands); /* 1-2 insns */
1307 else
4816b8e4 1308 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1309 }
1310 else
4816b8e4 1311 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1312 }
1313 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1314 switch (GET_MODE (dst))
1315 {
1316 case SImode:
1317 return "stw\t%1,%0";
1318 case HImode:
1319 return "st.h\t%1,%0";
1320 case QImode:
1321 return "st.b\t%1,%0";
1322 default:
6e1f65b5 1323 gcc_unreachable ();
f0f4da32 1324 }
8f90be4c 1325
6e1f65b5 1326 gcc_unreachable ();
8f90be4c
NC
1327}
1328
8f90be4c
NC
1329/* Return a sequence of instructions to perform DI or DF move.
1330 Since the MCORE cannot move a DI or DF in one instruction, we have
1331 to take care when we see overlapping source and dest registers. */
4816b8e4 1332
f27cd94d 1333const char *
08903e08 1334mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1335{
1336 rtx dst = operands[0];
1337 rtx src = operands[1];
1338
1339 if (GET_CODE (dst) == REG)
1340 {
1341 if (GET_CODE (src) == REG)
1342 {
1343 int dstreg = REGNO (dst);
1344 int srcreg = REGNO (src);
4816b8e4 1345
8f90be4c
NC
1346 /* Ensure the second source not overwritten. */
1347 if (srcreg + 1 == dstreg)
1348 return "mov %R0,%R1\n\tmov %0,%1";
1349 else
1350 return "mov %0,%1\n\tmov %R0,%R1";
1351 }
1352 else if (GET_CODE (src) == MEM)
1353 {
d72fe292 1354 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1355 int dstreg = REGNO (dst);
1356 int basereg = -1;
1357
1358 if (GET_CODE (memexp) == LABEL_REF)
1359 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1360 else if (GET_CODE (memexp) == REG)
1361 basereg = REGNO (memexp);
1362 else if (GET_CODE (memexp) == PLUS)
1363 {
1364 if (GET_CODE (XEXP (memexp, 0)) == REG)
1365 basereg = REGNO (XEXP (memexp, 0));
1366 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1367 basereg = REGNO (XEXP (memexp, 1));
1368 else
6e1f65b5 1369 gcc_unreachable ();
8f90be4c
NC
1370 }
1371 else
6e1f65b5 1372 gcc_unreachable ();
8f90be4c 1373
4816b8e4 1374 /* ??? length attribute is wrong here. */
8f90be4c
NC
1375 if (dstreg == basereg)
1376 {
4816b8e4 1377 /* Just load them in reverse order. */
8f90be4c 1378 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1379
8f90be4c 1380 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1381 and then fall through. */
8f90be4c
NC
1382 }
1383 else
1384 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1385 }
1386 else if (GET_CODE (src) == CONST_INT)
1387 {
1388 if (TARGET_LITTLE_END)
1389 {
1390 if (CONST_OK_FOR_I (INTVAL (src)))
1391 output_asm_insn ("movi %0,%1", operands);
1392 else if (CONST_OK_FOR_M (INTVAL (src)))
1393 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1394 else if (CONST_OK_FOR_N (INTVAL (src)))
1395 output_asm_insn ("bmaski %0,%N1", operands);
1396 else
6e1f65b5 1397 gcc_unreachable ();
8f90be4c
NC
1398
1399 if (INTVAL (src) < 0)
1400 return "bmaski %R0,32";
1401 else
1402 return "movi %R0,0";
1403 }
1404 else
1405 {
1406 if (CONST_OK_FOR_I (INTVAL (src)))
1407 output_asm_insn ("movi %R0,%1", operands);
1408 else if (CONST_OK_FOR_M (INTVAL (src)))
1409 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1410 else if (CONST_OK_FOR_N (INTVAL (src)))
1411 output_asm_insn ("bmaski %R0,%N1", operands);
1412 else
6e1f65b5 1413 gcc_unreachable ();
6e3a343d 1414
8f90be4c
NC
1415 if (INTVAL (src) < 0)
1416 return "bmaski %0,32";
1417 else
1418 return "movi %0,0";
1419 }
1420 }
1421 else
6e1f65b5 1422 gcc_unreachable ();
8f90be4c
NC
1423 }
1424 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1425 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1426 else
6e1f65b5 1427 gcc_unreachable ();
8f90be4c
NC
1428}
1429
1430/* Predicates used by the templates. */
1431
8f90be4c 1432int
08903e08 1433mcore_arith_S_operand (rtx op)
8f90be4c
NC
1434{
1435 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1436 return 1;
1437
1438 return 0;
1439}
1440
4816b8e4
NC
1441/* Expand insert bit field. BRC */
1442
8f90be4c 1443int
08903e08 1444mcore_expand_insv (rtx operands[])
8f90be4c
NC
1445{
1446 int width = INTVAL (operands[1]);
1447 int posn = INTVAL (operands[2]);
1448 int mask;
1449 rtx mreg, sreg, ereg;
1450
1451 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1452 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1453 we really want the md part to do. */
8f90be4c
NC
1454 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1455 {
4816b8e4
NC
1456 /* Do directly with bseti or bclri. */
1457 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1458 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1459 {
1460 mask = ~(1 << posn);
f1c25d3b
KH
1461 emit_insn (gen_rtx_SET (SImode, operands[0],
1462 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1463 }
1464 else
1465 {
1466 mask = 1 << posn;
f1c25d3b
KH
1467 emit_insn (gen_rtx_SET (SImode, operands[0],
1468 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1469 }
1470
1471 return 1;
1472 }
1473
43a88a8c 1474 /* Look at some bit-field placements that we aren't interested
4816b8e4 1475 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1476 if (! TARGET_W_FIELD)
1477 return 0; /* Generally, give up about now. */
1478
1479 if (width == 8 && posn % 8 == 0)
1480 /* Byte sized and aligned; let caller break it up. */
1481 return 0;
1482
1483 if (width == 16 && posn % 16 == 0)
1484 /* Short sized and aligned; let caller break it up. */
1485 return 0;
1486
1487 /* The general case - we can do this a little bit better than what the
1488 machine independent part tries. This will get rid of all the subregs
1489 that mess up constant folding in combine when working with relaxed
4816b8e4 1490 immediates. */
8f90be4c
NC
1491
1492 /* If setting the entire field, do it directly. */
6e3a343d
NC
1493 if (GET_CODE (operands[3]) == CONST_INT
1494 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1495 {
1496 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f1c25d3b
KH
1497 emit_insn (gen_rtx_SET (SImode, operands[0],
1498 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1499 return 1;
1500 }
1501
1502 /* Generate the clear mask. */
1503 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1504
1505 /* Clear the field, to overlay it later with the source. */
f1c25d3b
KH
1506 emit_insn (gen_rtx_SET (SImode, operands[0],
1507 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1508
1509 /* If the source is constant 0, we've nothing to add back. */
1510 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1511 return 1;
1512
1513 /* XXX: Should we worry about more games with constant values?
1514 We've covered the high profile: set/clear single-bit and many-bit
1515 fields. How often do we see "arbitrary bit pattern" constants? */
1516 sreg = copy_to_mode_reg (SImode, operands[3]);
1517
1518 /* Extract src as same width as dst (needed for signed values). We
1519 always have to do this since we widen everything to SImode.
1520 We don't have to mask if we're shifting this up against the
1521 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1522 bits. */
f27cd94d 1523 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1524 {
1525 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f1c25d3b
KH
1526 emit_insn (gen_rtx_SET (SImode, sreg,
1527 gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1528 }
1529
4816b8e4 1530 /* Insert source value in dest. */
8f90be4c 1531 if (posn != 0)
f1c25d3b
KH
1532 emit_insn (gen_rtx_SET (SImode, sreg,
1533 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
8f90be4c 1534
f1c25d3b
KH
1535 emit_insn (gen_rtx_SET (SImode, operands[0],
1536 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1537
1538 return 1;
1539}
8f90be4c
NC
1540\f
1541/* ??? Block move stuff stolen from m88k. This code has not been
1542 verified for correctness. */
1543
1544/* Emit code to perform a block move. Choose the best method.
1545
1546 OPERANDS[0] is the destination.
1547 OPERANDS[1] is the source.
1548 OPERANDS[2] is the size.
1549 OPERANDS[3] is the alignment safe to use. */
1550
1551/* Emit code to perform a block move with an offset sequence of ldw/st
1552 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1553 known constants. DEST and SRC are registers. OFFSET is the known
1554 starting point for the output pattern. */
1555
8b60264b 1556static const enum machine_mode mode_from_align[] =
8f90be4c
NC
1557{
1558 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1559};
1560
1561static void
88042663 1562block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1563{
1564 rtx temp[2];
1565 enum machine_mode mode[2];
1566 int amount[2];
88042663 1567 bool active[2];
8f90be4c
NC
1568 int phase = 0;
1569 int next;
88042663
RH
1570 int offset_ld = 0;
1571 int offset_st = 0;
1572 rtx x;
8f90be4c 1573
88042663
RH
1574 x = XEXP (dst_mem, 0);
1575 if (!REG_P (x))
1576 {
1577 x = force_reg (Pmode, x);
1578 dst_mem = replace_equiv_address (dst_mem, x);
1579 }
8f90be4c 1580
88042663
RH
1581 x = XEXP (src_mem, 0);
1582 if (!REG_P (x))
8f90be4c 1583 {
88042663
RH
1584 x = force_reg (Pmode, x);
1585 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1586 }
1587
88042663
RH
1588 active[0] = active[1] = false;
1589
8f90be4c
NC
1590 do
1591 {
8f90be4c 1592 next = phase;
88042663 1593 phase ^= 1;
8f90be4c
NC
1594
1595 if (size > 0)
1596 {
88042663
RH
1597 int next_amount;
1598
1599 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1600 next_amount = MIN (next_amount, align);
1601
1602 amount[next] = next_amount;
1603 mode[next] = mode_from_align[next_amount];
1604 temp[next] = gen_reg_rtx (mode[next]);
1605
1606 x = adjust_address (src_mem, mode[next], offset_ld);
1607 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1608
1609 offset_ld += next_amount;
1610 size -= next_amount;
1611 active[next] = true;
8f90be4c
NC
1612 }
1613
1614 if (active[phase])
1615 {
88042663 1616 active[phase] = false;
8f90be4c 1617
88042663
RH
1618 x = adjust_address (dst_mem, mode[phase], offset_st);
1619 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1620
8f90be4c
NC
1621 offset_st += amount[phase];
1622 }
1623 }
1624 while (active[next]);
1625}
1626
88042663
RH
1627bool
1628mcore_expand_block_move (rtx *operands)
8f90be4c 1629{
88042663
RH
1630 HOST_WIDE_INT align, bytes, max;
1631
1632 if (GET_CODE (operands[2]) != CONST_INT)
1633 return false;
1634
1635 bytes = INTVAL (operands[2]);
1636 align = INTVAL (operands[3]);
8f90be4c 1637
88042663
RH
1638 if (bytes <= 0)
1639 return false;
1640 if (align > 4)
1641 align = 4;
1642
1643 switch (align)
8f90be4c 1644 {
88042663
RH
1645 case 4:
1646 if (bytes & 1)
1647 max = 4*4;
1648 else if (bytes & 3)
1649 max = 8*4;
1650 else
1651 max = 16*4;
1652 break;
1653 case 2:
1654 max = 4*2;
1655 break;
1656 case 1:
1657 max = 4*1;
1658 break;
1659 default:
6e1f65b5 1660 gcc_unreachable ();
88042663
RH
1661 }
1662
1663 if (bytes <= max)
1664 {
1665 block_move_sequence (operands[0], operands[1], bytes, align);
1666 return true;
8f90be4c
NC
1667 }
1668
88042663 1669 return false;
8f90be4c
NC
1670}
1671\f
1672
1673/* Code to generate prologue and epilogue sequences. */
1674static int number_of_regs_before_varargs;
4816b8e4 1675
bd5bd7ac 1676/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1677 for a varargs function. */
1678static int current_function_anonymous_args;
1679
8f90be4c
NC
1680#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1681#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1682#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1683
8f90be4c 1684static void
08903e08 1685layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1686{
1687 int n;
1688 unsigned int i;
1689 int nbytes;
1690 int regarg;
1691 int localregarg;
8f90be4c
NC
1692 int outbounds;
1693 unsigned int growths;
1694 int step;
1695
1696 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1697 was passed partially in registers and partially on the stack. */
38173d38 1698 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1699
1700 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1701 if (current_function_anonymous_args)
1702 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1703
1704 infp->arg_size = nbytes;
1705
1706 /* How much space to save non-volatile registers we stomp. */
1707 infp->reg_mask = calc_live_regs (& n);
1708 infp->reg_size = n * 4;
1709
14bc6742 1710 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1711 infp->local_size = get_frame_size ();
38173d38 1712 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1713
1714 /* Make sure we have a whole number of words for the locals. */
1715 if (infp->local_size % STACK_BYTES)
1716 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1717
1718 /* Only thing we know we have to pad is the outbound space, since
1719 we've aligned our locals assuming that base of locals is aligned. */
1720 infp->pad_local = 0;
1721 infp->pad_reg = 0;
1722 infp->pad_outbound = 0;
1723 if (infp->outbound_size % STACK_BYTES)
1724 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1725
1726 /* Now we see how we want to stage the prologue so that it does
1727 the most appropriate stack growth and register saves to either:
1728 (1) run fast,
1729 (2) reduce instruction space, or
1730 (3) reduce stack space. */
b6a1cbae 1731 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1732 infp->growth[i] = 0;
1733
1734 regarg = infp->reg_size + infp->arg_size;
1735 localregarg = infp->local_size + regarg;
8f90be4c
NC
1736 outbounds = infp->outbound_size + infp->pad_outbound;
1737 growths = 0;
1738
1739 /* XXX: Consider one where we consider localregarg + outbound too! */
1740
1741 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1742 use stw's with offsets and buy the frame in one shot. */
1743 if (localregarg <= ADDI_REACH
1744 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1745 {
1746 /* Make sure we'll be aligned. */
1747 if (localregarg % STACK_BYTES)
1748 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1749
1750 step = localregarg + infp->pad_reg;
1751 infp->reg_offset = infp->local_size;
1752
1753 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1754 {
1755 step += outbounds;
1756 infp->reg_offset += outbounds;
1757 outbounds = 0;
1758 }
1759
1760 infp->arg_offset = step - 4;
1761 infp->growth[growths++] = step;
1762 infp->reg_growth = growths;
1763 infp->local_growth = growths;
1764
4816b8e4 1765 /* If we haven't already folded it in. */
8f90be4c
NC
1766 if (outbounds)
1767 infp->growth[growths++] = outbounds;
1768
1769 goto finish;
1770 }
1771
1772 /* Frame can't be done with a single subi, but can be done with 2
1773 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1774 shift some of the stack purchase into the first subi, so both are
1775 single instructions. */
1776 if (localregarg <= STORE_REACH
1777 && (infp->local_size > ADDI_REACH)
1778 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1779 {
1780 int all;
1781
1782 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1783 if (localregarg % STACK_BYTES)
1784 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1785
1786 all = localregarg + infp->pad_reg + infp->pad_local;
1787 step = ADDI_REACH; /* As much up front as we can. */
1788 if (step > all)
1789 step = all;
1790
1791 /* XXX: Consider whether step will still be aligned; we believe so. */
1792 infp->arg_offset = step - 4;
1793 infp->growth[growths++] = step;
1794 infp->reg_growth = growths;
1795 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1796 all -= step;
1797
4816b8e4 1798 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1799 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1800 {
1801 all += outbounds;
1802 outbounds = 0;
1803 }
1804
4816b8e4 1805 /* Get the rest of the locals in place. */
8f90be4c
NC
1806 step = all;
1807 infp->growth[growths++] = step;
1808 infp->local_growth = growths;
1809 all -= step;
1810
819bfe0e 1811 gcc_assert (all == 0);
8f90be4c 1812
4816b8e4 1813 /* Finish off if we need to do so. */
8f90be4c
NC
1814 if (outbounds)
1815 infp->growth[growths++] = outbounds;
1816
1817 goto finish;
1818 }
1819
1820 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1821 Then we buy the rest of the frame in 1 or 2 steps depending on
1822 whether we need a frame pointer. */
1823 if ((regarg % STACK_BYTES) == 0)
1824 {
1825 infp->growth[growths++] = regarg;
1826 infp->reg_growth = growths;
1827 infp->arg_offset = regarg - 4;
1828 infp->reg_offset = 0;
1829
1830 if (infp->local_size % STACK_BYTES)
1831 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1832
1833 step = infp->local_size + infp->pad_local;
1834
1835 if (!frame_pointer_needed)
1836 {
1837 step += outbounds;
1838 outbounds = 0;
1839 }
1840
1841 infp->growth[growths++] = step;
1842 infp->local_growth = growths;
1843
4816b8e4 1844 /* If there's any left to be done. */
8f90be4c
NC
1845 if (outbounds)
1846 infp->growth[growths++] = outbounds;
1847
1848 goto finish;
1849 }
1850
1851 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1852 -- regarg is not aligned, but it's a small number of registers;
1853 use some of localsize so that regarg is aligned and then
1854 save the registers. */
8f90be4c
NC
1855
1856 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1857 -- does not optimize space consumption.
1858 -- does not attempt to optimize instruction counts.
1859 -- but it is safe for all alignments. */
8f90be4c
NC
1860 if (regarg % STACK_BYTES != 0)
1861 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1862
1863 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1864 infp->reg_growth = growths;
1865 infp->arg_offset = infp->growth[0] - 4;
1866 infp->reg_offset = 0;
1867
1868 if (frame_pointer_needed)
1869 {
1870 if (infp->local_size % STACK_BYTES != 0)
1871 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1872
1873 infp->growth[growths++] = infp->local_size + infp->pad_local;
1874 infp->local_growth = growths;
1875
1876 infp->growth[growths++] = outbounds;
1877 }
1878 else
1879 {
1880 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1881 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1882
1883 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1884 infp->local_growth = growths;
1885 }
1886
f27cd94d 1887 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1888 finish:
819bfe0e
JM
1889 gcc_assert (infp->reg_offset >= 0);
1890 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1891
1892 for (i = 0; i < growths; i++)
6e1f65b5 1893 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1894}
1895
1896/* Define the offset between two registers, one to be eliminated, and
1897 the other its replacement, at the start of a routine. */
4816b8e4 1898
8f90be4c 1899int
08903e08 1900mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1901{
1902 int above_frame;
1903 int below_frame;
1904 struct mcore_frame fi;
1905
1906 layout_mcore_frame (& fi);
1907
1908 /* fp to ap */
1909 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1910 /* sp to fp */
1911 below_frame = fi.outbound_size + fi.pad_outbound;
1912
1913 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1914 return above_frame;
1915
1916 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1917 return above_frame + below_frame;
1918
1919 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1920 return below_frame;
1921
6e1f65b5 1922 gcc_unreachable ();
8f90be4c
NC
1923}
1924
4816b8e4
NC
1925/* Keep track of some information about varargs for the prolog. */
1926
09a2b93a 1927static void
d5cc9181 1928mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
08903e08 1929 enum machine_mode mode, tree type,
09a2b93a
KH
1930 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1931 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1932{
d5cc9181
JR
1933 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1934
8f90be4c
NC
1935 current_function_anonymous_args = 1;
1936
1937 /* We need to know how many argument registers are used before
1938 the varargs start, so that we can push the remaining argument
1939 registers during the prologue. */
09a2b93a 1940 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1941
dab66575 1942 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1943 Until I can find it this workaround always pushes the
1944 last named argument onto the stack. */
09a2b93a 1945 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1946
1947 /* The last named argument may be split between argument registers
1948 and the stack. Allow for this here. */
1949 if (number_of_regs_before_varargs > NPARM_REGS)
1950 number_of_regs_before_varargs = NPARM_REGS;
1951}
1952
1953void
08903e08 1954mcore_expand_prolog (void)
8f90be4c
NC
1955{
1956 struct mcore_frame fi;
1957 int space_allocated = 0;
1958 int growth = 0;
1959
1960 /* Find out what we're doing. */
1961 layout_mcore_frame (&fi);
1962
1963 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1964 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1965
1966 if (TARGET_CG_DATA)
1967 {
1968 /* Emit a symbol for this routine's frame size. */
1969 rtx x;
8f90be4c
NC
1970
1971 x = DECL_RTL (current_function_decl);
1972
6e1f65b5 1973 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1974
1975 x = XEXP (x, 0);
1976
6e1f65b5 1977 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1978
04695783 1979 free (mcore_current_function_name);
8f90be4c 1980
1dcd444b 1981 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1982
1983 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1984
e3b5732b 1985 if (cfun->calls_alloca)
8f90be4c
NC
1986 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1987
1988 /* 970425: RBE:
1989 We're looking at how the 8byte alignment affects stack layout
1990 and where we had to pad things. This emits information we can
1991 extract which tells us about frame sizes and the like. */
1992 fprintf (asm_out_file,
1993 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1994 mcore_current_function_name,
1995 fi.arg_size, fi.reg_size, fi.reg_mask,
1996 fi.local_size, fi.outbound_size,
1997 frame_pointer_needed);
1998 }
1999
2000 if (mcore_naked_function_p ())
2001 return;
2002
2003 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 2004 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2005
2006 /* If we have a parameter passed partially in regs and partially in memory,
2007 the registers will have been stored to memory already in function.c. So
2008 we only need to do something here for varargs functions. */
38173d38 2009 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2010 {
2011 int offset;
2012 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2013 int remaining = fi.arg_size;
2014
2015 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2016 {
2017 emit_insn (gen_movsi
f1c25d3b 2018 (gen_rtx_MEM (SImode,
0a81f074
RS
2019 plus_constant (Pmode, stack_pointer_rtx,
2020 offset)),
f1c25d3b 2021 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2022 }
2023 }
2024
4816b8e4 2025 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2026 if (growth < fi.reg_growth)
08903e08 2027 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2028
2029 if (fi.reg_size != 0)
2030 {
2031 int i;
2032 int offs = fi.reg_offset;
2033
2034 for (i = 15; i >= 0; i--)
2035 {
2036 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2037 {
2038 int first_reg = 15;
2039
2040 while (fi.reg_mask & (1 << first_reg))
2041 first_reg--;
2042 first_reg++;
2043
f1c25d3b
KH
2044 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2045 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2046 GEN_INT (16 - first_reg)));
2047
2048 i -= (15 - first_reg);
2049 offs += (16 - first_reg) * 4;
2050 }
2051 else if (fi.reg_mask & (1 << i))
2052 {
2053 emit_insn (gen_movsi
f1c25d3b 2054 (gen_rtx_MEM (SImode,
0a81f074
RS
2055 plus_constant (Pmode, stack_pointer_rtx,
2056 offs)),
f1c25d3b 2057 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2058 offs += 4;
2059 }
2060 }
2061 }
2062
2063 /* Figure the locals + outbounds. */
2064 if (frame_pointer_needed)
2065 {
2066 /* If we haven't already purchased to 'fp'. */
2067 if (growth < fi.local_growth)
08903e08 2068 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2069
2070 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2071
4816b8e4 2072 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2073 if (fi.growth[growth])
2074 output_stack_adjust (-1, fi.growth[growth++]);
2075 }
2076 else
2077 {
2078 if (growth < fi.local_growth)
08903e08 2079 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2080 if (fi.growth[growth])
2081 output_stack_adjust (-1, fi.growth[growth++]);
2082 }
2083}
2084
2085void
08903e08 2086mcore_expand_epilog (void)
8f90be4c
NC
2087{
2088 struct mcore_frame fi;
2089 int i;
2090 int offs;
2091 int growth = MAX_STACK_GROWS - 1 ;
2092
f27cd94d 2093
8f90be4c
NC
2094 /* Find out what we're doing. */
2095 layout_mcore_frame(&fi);
2096
2097 if (mcore_naked_function_p ())
2098 return;
f27cd94d 2099
8f90be4c
NC
2100 /* If we had a frame pointer, restore the sp from that. */
2101 if (frame_pointer_needed)
2102 {
2103 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2104 growth = fi.local_growth - 1;
2105 }
2106 else
2107 {
2108 /* XXX: while loop should accumulate and do a single sell. */
2109 while (growth >= fi.local_growth)
2110 {
2111 if (fi.growth[growth] != 0)
2112 output_stack_adjust (1, fi.growth[growth]);
2113 growth--;
2114 }
2115 }
2116
2117 /* Make sure we've shrunk stack back to the point where the registers
2118 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2119 register save information back off the stack. */
8f90be4c
NC
2120 while (growth >= fi.reg_growth)
2121 output_stack_adjust ( 1, fi.growth[growth--]);
2122
2123 offs = fi.reg_offset;
2124
2125 for (i = 15; i >= 0; i--)
2126 {
2127 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2128 {
2129 int first_reg;
2130
2131 /* Find the starting register. */
2132 first_reg = 15;
2133
2134 while (fi.reg_mask & (1 << first_reg))
2135 first_reg--;
2136
2137 first_reg++;
2138
f1c25d3b
KH
2139 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2140 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2141 GEN_INT (16 - first_reg)));
2142
2143 i -= (15 - first_reg);
2144 offs += (16 - first_reg) * 4;
2145 }
2146 else if (fi.reg_mask & (1 << i))
2147 {
2148 emit_insn (gen_movsi
f1c25d3b
KH
2149 (gen_rtx_REG (SImode, i),
2150 gen_rtx_MEM (SImode,
0a81f074
RS
2151 plus_constant (Pmode, stack_pointer_rtx,
2152 offs))));
8f90be4c
NC
2153 offs += 4;
2154 }
2155 }
2156
2157 /* Give back anything else. */
dab66575 2158 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2159 while (growth >= 0)
2160 output_stack_adjust ( 1, fi.growth[growth--]);
2161}
2162\f
2163/* This code is borrowed from the SH port. */
2164
2165/* The MCORE cannot load a large constant into a register, constants have to
2166 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2167 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2168 means that we often have to dump a constant inside a function, and
2169 generate code to branch around it.
2170
2171 It is important to minimize this, since the branches will slow things
2172 down and make things bigger.
2173
2174 Worst case code looks like:
2175
2176 lrw L1,r0
2177 br L2
2178 align
2179 L1: .long value
2180 L2:
2181 ..
2182
2183 lrw L3,r0
2184 br L4
2185 align
2186 L3: .long value
2187 L4:
2188 ..
2189
2190 We fix this by performing a scan before scheduling, which notices which
2191 instructions need to have their operands fetched from the constant table
2192 and builds the table.
2193
2194 The algorithm is:
2195
2196 scan, find an instruction which needs a pcrel move. Look forward, find the
2197 last barrier which is within MAX_COUNT bytes of the requirement.
2198 If there isn't one, make one. Process all the instructions between
2199 the find and the barrier.
2200
2201 In the above example, we can tell that L3 is within 1k of L1, so
2202 the first move can be shrunk from the 2 insn+constant sequence into
2203 just 1 insn, and the constant moved to L3 to make:
2204
2205 lrw L1,r0
2206 ..
2207 lrw L3,r0
2208 bra L4
2209 align
2210 L3:.long value
2211 L4:.long value
2212
2213 Then the second move becomes the target for the shortening process. */
2214
2215typedef struct
2216{
2217 rtx value; /* Value in table. */
2218 rtx label; /* Label of value. */
2219} pool_node;
2220
2221/* The maximum number of constants that can fit into one pool, since
2222 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2223 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2224 we need to add a branch/align before the constant pool. */
2225
2226#define MAX_COUNT 1016
2227#define MAX_POOL_SIZE (MAX_COUNT/4)
2228static pool_node pool_vector[MAX_POOL_SIZE];
2229static int pool_size;
2230
2231/* Dump out any constants accumulated in the final pass. These
2232 will only be labels. */
4816b8e4 2233
f27cd94d 2234const char *
08903e08 2235mcore_output_jump_label_table (void)
8f90be4c
NC
2236{
2237 int i;
2238
2239 if (pool_size)
2240 {
2241 fprintf (asm_out_file, "\t.align 2\n");
2242
2243 for (i = 0; i < pool_size; i++)
2244 {
2245 pool_node * p = pool_vector + i;
2246
4977bab6 2247 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2248
2249 output_asm_insn (".long %0", &p->value);
2250 }
2251
2252 pool_size = 0;
2253 }
2254
2255 return "";
2256}
2257
8f90be4c 2258/* Check whether insn is a candidate for a conditional. */
4816b8e4 2259
8f90be4c 2260static cond_type
08903e08 2261is_cond_candidate (rtx insn)
8f90be4c
NC
2262{
2263 /* The only things we conditionalize are those that can be directly
2264 changed into a conditional. Only bother with SImode items. If
2265 we wanted to be a little more aggressive, we could also do other
4816b8e4 2266 modes such as DImode with reg-reg move or load 0. */
b64925dc 2267 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2268 {
2269 rtx pat = PATTERN (insn);
2270 rtx src, dst;
2271
2272 if (GET_CODE (pat) != SET)
2273 return COND_NO;
2274
2275 dst = XEXP (pat, 0);
2276
2277 if ((GET_CODE (dst) != REG &&
2278 GET_CODE (dst) != SUBREG) ||
2279 GET_MODE (dst) != SImode)
2280 return COND_NO;
2281
2282 src = XEXP (pat, 1);
2283
2284 if ((GET_CODE (src) == REG ||
2285 (GET_CODE (src) == SUBREG &&
2286 GET_CODE (SUBREG_REG (src)) == REG)) &&
2287 GET_MODE (src) == SImode)
2288 return COND_MOV_INSN;
2289 else if (GET_CODE (src) == CONST_INT &&
2290 INTVAL (src) == 0)
2291 return COND_CLR_INSN;
2292 else if (GET_CODE (src) == PLUS &&
2293 (GET_CODE (XEXP (src, 0)) == REG ||
2294 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2295 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2296 GET_MODE (XEXP (src, 0)) == SImode &&
2297 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2298 INTVAL (XEXP (src, 1)) == 1)
2299 return COND_INC_INSN;
2300 else if (((GET_CODE (src) == MINUS &&
2301 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2302 INTVAL( XEXP (src, 1)) == 1) ||
2303 (GET_CODE (src) == PLUS &&
2304 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2305 INTVAL (XEXP (src, 1)) == -1)) &&
2306 (GET_CODE (XEXP (src, 0)) == REG ||
2307 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2308 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2309 GET_MODE (XEXP (src, 0)) == SImode)
2310 return COND_DEC_INSN;
2311
14bc6742 2312 /* Some insns that we don't bother with:
8f90be4c
NC
2313 (set (rx:DI) (ry:DI))
2314 (set (rx:DI) (const_int 0))
2315 */
2316
2317 }
b64925dc
SB
2318 else if (JUMP_P (insn)
2319 && GET_CODE (PATTERN (insn)) == SET
2320 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
8f90be4c
NC
2321 return COND_BRANCH_INSN;
2322
2323 return COND_NO;
2324}
2325
2326/* Emit a conditional version of insn and replace the old insn with the
2327 new one. Return the new insn if emitted. */
4816b8e4 2328
b32d5189 2329static rtx_insn *
08903e08 2330emit_new_cond_insn (rtx insn, int cond)
8f90be4c
NC
2331{
2332 rtx c_insn = 0;
2333 rtx pat, dst, src;
2334 cond_type num;
2335
2336 if ((num = is_cond_candidate (insn)) == COND_NO)
2337 return NULL;
2338
2339 pat = PATTERN (insn);
2340
b64925dc 2341 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2342 {
2343 dst = SET_DEST (pat);
2344 src = SET_SRC (pat);
2345 }
2346 else
cd4c46f3
KG
2347 {
2348 dst = JUMP_LABEL (insn);
2349 src = NULL_RTX;
2350 }
8f90be4c
NC
2351
2352 switch (num)
2353 {
2354 case COND_MOV_INSN:
2355 case COND_CLR_INSN:
2356 if (cond)
2357 c_insn = gen_movt0 (dst, src, dst);
2358 else
2359 c_insn = gen_movt0 (dst, dst, src);
2360 break;
2361
2362 case COND_INC_INSN:
2363 if (cond)
2364 c_insn = gen_incscc (dst, dst);
2365 else
2366 c_insn = gen_incscc_false (dst, dst);
2367 break;
2368
2369 case COND_DEC_INSN:
2370 if (cond)
2371 c_insn = gen_decscc (dst, dst);
2372 else
2373 c_insn = gen_decscc_false (dst, dst);
2374 break;
2375
2376 case COND_BRANCH_INSN:
2377 if (cond)
2378 c_insn = gen_branch_true (dst);
2379 else
2380 c_insn = gen_branch_false (dst);
2381 break;
2382
2383 default:
2384 return NULL;
2385 }
2386
2387 /* Only copy the notes if they exist. */
2388 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2389 {
2390 /* We really don't need to bother with the notes and links at this
2391 point, but go ahead and save the notes. This will help is_dead()
2392 when applying peepholes (links don't matter since they are not
2393 used any more beyond this point for the mcore). */
2394 REG_NOTES (c_insn) = REG_NOTES (insn);
2395 }
2396
2397 if (num == COND_BRANCH_INSN)
2398 {
2399 /* For jumps, we need to be a little bit careful and emit the new jump
2400 before the old one and to update the use count for the target label.
2401 This way, the barrier following the old (uncond) jump will get
2402 deleted, but the label won't. */
2403 c_insn = emit_jump_insn_before (c_insn, insn);
2404
2405 ++ LABEL_NUSES (dst);
2406
2407 JUMP_LABEL (c_insn) = dst;
2408 }
2409 else
2410 c_insn = emit_insn_after (c_insn, insn);
2411
2412 delete_insn (insn);
2413
b32d5189 2414 return as_a <rtx_insn *> (c_insn);
8f90be4c
NC
2415}
2416
2417/* Attempt to change a basic block into a series of conditional insns. This
2418 works by taking the branch at the end of the 1st block and scanning for the
2419 end of the 2nd block. If all instructions in the 2nd block have cond.
2420 versions and the label at the start of block 3 is the same as the target
2421 from the branch at block 1, then conditionalize all insn in block 2 using
2422 the inverse condition of the branch at block 1. (Note I'm bending the
2423 definition of basic block here.)
2424
2425 e.g., change:
2426
2427 bt L2 <-- end of block 1 (delete)
2428 mov r7,r8
2429 addu r7,1
2430 br L3 <-- end of block 2
2431
2432 L2: ... <-- start of block 3 (NUSES==1)
2433 L3: ...
2434
2435 to:
2436
2437 movf r7,r8
2438 incf r7
2439 bf L3
2440
2441 L3: ...
2442
2443 we can delete the L2 label if NUSES==1 and re-apply the optimization
2444 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2445 if-then-else statement to be conditionalized. BRC */
b32d5189
DM
2446static rtx_insn *
2447conditionalize_block (rtx_insn *first)
8f90be4c 2448{
b32d5189 2449 rtx_insn *insn;
8f90be4c 2450 rtx br_pat;
b32d5189
DM
2451 rtx_insn *end_blk_1_br = 0;
2452 rtx_insn *end_blk_2_insn = 0;
2453 rtx_insn *start_blk_3_lab = 0;
8f90be4c
NC
2454 int cond;
2455 int br_lab_num;
2456 int blk_size = 0;
2457
2458
2459 /* Check that the first insn is a candidate conditional jump. This is
2460 the one that we'll eliminate. If not, advance to the next insn to
2461 try. */
b64925dc
SB
2462 if (! JUMP_P (first)
2463 || GET_CODE (PATTERN (first)) != SET
2464 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
8f90be4c
NC
2465 return NEXT_INSN (first);
2466
2467 /* Extract some information we need. */
2468 end_blk_1_br = first;
2469 br_pat = PATTERN (end_blk_1_br);
2470
2471 /* Complement the condition since we use the reverse cond. for the insns. */
2472 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2473
2474 /* Determine what kind of branch we have. */
2475 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2476 {
2477 /* A normal branch, so extract label out of first arm. */
2478 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2479 }
2480 else
2481 {
2482 /* An inverse branch, so extract the label out of the 2nd arm
2483 and complement the condition. */
2484 cond = (cond == 0);
2485 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2486 }
2487
2488 /* Scan forward for the start of block 2: it must start with a
2489 label and that label must be the same as the branch target
2490 label from block 1. We don't care about whether block 2 actually
2491 ends with a branch or a label (an uncond. branch is
2492 conditionalizable). */
2493 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2494 {
2495 enum rtx_code code;
2496
2497 code = GET_CODE (insn);
2498
14bc6742 2499 /* Look for the label at the start of block 3. */
8f90be4c
NC
2500 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2501 break;
2502
2503 /* Skip barriers, notes, and conditionalizable insns. If the
2504 insn is not conditionalizable or makes this optimization fail,
2505 just return the next insn so we can start over from that point. */
2506 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2507 return NEXT_INSN (insn);
2508
112cdef5 2509 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2510 if (code == JUMP_INSN || code == INSN)
2511 {
2512 blk_size ++;
2513 end_blk_2_insn = insn;
2514 }
2515 }
2516
2517 if (!insn)
2518 return insn;
2519
2520 /* It is possible for this optimization to slow performance if the blocks
2521 are long. This really depends upon whether the branch is likely taken
2522 or not. If the branch is taken, we slow performance in many cases. But,
2523 if the branch is not taken, we always help performance (for a single
2524 block, but for a double block (i.e. when the optimization is re-applied)
2525 this is not true since the 'right thing' depends on the overall length of
2526 the collapsed block). As a compromise, don't apply this optimization on
2527 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2528 the best threshold depends on the latencies of the instructions (i.e.,
2529 the branch penalty). */
2530 if (optimize > 1 && blk_size > 2)
2531 return insn;
2532
2533 /* At this point, we've found the start of block 3 and we know that
2534 it is the destination of the branch from block 1. Also, all
2535 instructions in the block 2 are conditionalizable. So, apply the
2536 conditionalization and delete the branch. */
2537 start_blk_3_lab = insn;
2538
2539 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2540 insn = NEXT_INSN (insn))
2541 {
b32d5189 2542 rtx_insn *newinsn;
8f90be4c 2543
4654c0cf 2544 if (insn->deleted ())
8f90be4c
NC
2545 continue;
2546
14bc6742 2547 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2548 if ((newinsn = emit_new_cond_insn (insn, cond)))
2549 {
2550 if (end_blk_2_insn == insn)
2551 end_blk_2_insn = newinsn;
2552
2553 insn = newinsn;
2554 }
2555 }
2556
2557 /* Note whether we will delete the label starting blk 3 when the jump
2558 gets deleted. If so, we want to re-apply this optimization at the
2559 last real instruction right before the label. */
2560 if (LABEL_NUSES (start_blk_3_lab) == 1)
2561 {
2562 start_blk_3_lab = 0;
2563 }
2564
2565 /* ??? we probably should redistribute the death notes for this insn, esp.
2566 the death of cc, but it doesn't really matter this late in the game.
2567 The peepholes all use is_dead() which will find the correct death
2568 regardless of whether there is a note. */
2569 delete_insn (end_blk_1_br);
2570
2571 if (! start_blk_3_lab)
2572 return end_blk_2_insn;
2573
4816b8e4 2574 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2575 return NEXT_INSN (start_blk_3_lab);
2576}
2577
2578/* Apply the conditionalization of blocks optimization. This is the
2579 outer loop that traverses through the insns scanning for a branch
2580 that signifies an opportunity to apply the optimization. Note that
2581 this optimization is applied late. If we could apply it earlier,
2582 say before cse 2, it may expose more optimization opportunities.
2583 but, the pay back probably isn't really worth the effort (we'd have
2584 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2585 in before cse 2). */
2586
8f90be4c 2587static void
08903e08 2588conditionalize_optimization (void)
8f90be4c 2589{
b32d5189 2590 rtx_insn *insn;
8f90be4c 2591
18dbd950 2592 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2593 continue;
2594}
2595
18dbd950 2596/* This is to handle loads from the constant pool. */
4816b8e4 2597
18dbd950 2598static void
08903e08 2599mcore_reorg (void)
8f90be4c
NC
2600{
2601 /* Reset this variable. */
2602 current_function_anonymous_args = 0;
2603
8f90be4c
NC
2604 if (optimize == 0)
2605 return;
2606
2607 /* Conditionalize blocks where we can. */
18dbd950 2608 conditionalize_optimization ();
8f90be4c
NC
2609
2610 /* Literal pool generation is now pushed off until the assembler. */
2611}
2612
2613\f
f0f4da32 2614/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2615
f0f4da32 2616bool
08903e08 2617mcore_r15_operand_p (rtx x)
f0f4da32
RS
2618{
2619 switch (GET_CODE (x))
2620 {
2621 case CONST_INT:
2622 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2623
f0f4da32
RS
2624 case REG:
2625 case SUBREG:
2626 case MEM:
2627 return 1;
2628
2629 default:
2630 return 0;
2631 }
2632}
2633
0a2aaacc 2634/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2635 directly move X into it, use r1-r14 as a temporary. */
08903e08 2636
f0f4da32 2637enum reg_class
0a2aaacc 2638mcore_secondary_reload_class (enum reg_class rclass,
08903e08 2639 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2640{
0a2aaacc 2641 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2642 && !mcore_r15_operand_p (x))
2643 return LRW_REGS;
2644 return NO_REGS;
2645}
8f90be4c 2646
f0f4da32 2647/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2648 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2649 use LRW_REGS instead. */
08903e08 2650
8f90be4c 2651enum reg_class
0a2aaacc 2652mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2653{
0a2aaacc 2654 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2655 return LRW_REGS;
8f90be4c 2656
0a2aaacc 2657 return rclass;
8f90be4c
NC
2658}
2659
2660/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2661 register. Note that the current version doesn't worry about whether
2662 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2663 in r2 matches an SImode in r2. Might think in the future about whether
2664 we want to be able to say something about modes. */
08903e08 2665
8f90be4c 2666int
08903e08 2667mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2668{
14bc6742 2669 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2670 while (GET_CODE (x) == SUBREG)
2671 x = SUBREG_REG (x);
2672
2673 while (GET_CODE (y) == SUBREG)
2674 y = SUBREG_REG (y);
2675
2676 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2677 return 1;
2678
2679 return 0;
2680}
2681
c5387660
JM
2682static void
2683mcore_option_override (void)
8f90be4c 2684{
8f90be4c
NC
2685 /* Only the m340 supports little endian code. */
2686 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2687 target_flags |= MASK_M340;
8f90be4c 2688}
fac0f722 2689
8f90be4c 2690\f
8f90be4c
NC
2691/* Compute the number of word sized registers needed to
2692 hold a function argument of mode MODE and type TYPE. */
08903e08 2693
8f90be4c 2694int
586de218 2695mcore_num_arg_regs (enum machine_mode mode, const_tree type)
8f90be4c
NC
2696{
2697 int size;
2698
fe984136 2699 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2700 return 0;
2701
2702 if (type && mode == BLKmode)
2703 size = int_size_in_bytes (type);
2704 else
2705 size = GET_MODE_SIZE (mode);
2706
2707 return ROUND_ADVANCE (size);
2708}
2709
2710static rtx
586de218 2711handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2712{
2713 int size;
2714
696e78bf 2715 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2716 of bytes is passed packed into registers (or spilled onto the stack if
2717 not enough registers are available) with the last few bytes of the
2718 structure being packed, left-justified, into the last register/stack slot.
2719 GCC handles this correctly if the last word is in a stack slot, but we
2720 have to generate a special, PARALLEL RTX if the last word is in an
2721 argument register. */
2722 if (type
2723 && TYPE_MODE (type) == BLKmode
2724 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2725 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2726 && (size % UNITS_PER_WORD != 0)
2727 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2728 {
2729 rtx arg_regs [NPARM_REGS];
2730 int nregs;
2731 rtx result;
2732 rtvec rtvec;
2733
2734 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2735 {
2736 arg_regs [nregs] =
2737 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2738 GEN_INT (nregs * UNITS_PER_WORD));
2739 nregs ++;
2740 }
2741
2742 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2743 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2744 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2745 arg_regs[3], arg_regs[4], arg_regs[5]);
2746
2747 result = gen_rtx_PARALLEL (mode, rtvec);
2748 return result;
2749 }
2750
2751 return gen_rtx_REG (mode, reg);
2752}
2753
2754rtx
cde0f3fd 2755mcore_function_value (const_tree valtype, const_tree func)
8f90be4c
NC
2756{
2757 enum machine_mode mode;
2758 int unsigned_p;
2759
2760 mode = TYPE_MODE (valtype);
2761
cde0f3fd 2762 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2763 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2764
2765 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2766}
2767
2768/* Define where to put the arguments to a function.
2769 Value is zero to push the argument on the stack,
2770 or a hard register in which to store the argument.
2771
2772 MODE is the argument's machine mode.
2773 TYPE is the data type of the argument (as a tree).
2774 This is null for libcalls where that information may
2775 not be available.
2776 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2777 the preceding args and about the function being called.
2778 NAMED is nonzero if this argument is a named parameter
2779 (otherwise it is an extra parameter matching an ellipsis).
2780
2781 On MCore the first args are normally in registers
2782 and the rest are pushed. Any arg that starts within the first
2783 NPARM_REGS words is at least partially passed in a register unless
2784 its data type forbids. */
08903e08 2785
4665ac17 2786static rtx
d5cc9181 2787mcore_function_arg (cumulative_args_t cum, enum machine_mode mode,
4665ac17 2788 const_tree type, bool named)
8f90be4c
NC
2789{
2790 int arg_reg;
2791
88042663 2792 if (! named || mode == VOIDmode)
8f90be4c
NC
2793 return 0;
2794
fe984136 2795 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2796 return 0;
2797
d5cc9181 2798 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2799
2800 if (arg_reg < NPARM_REGS)
2801 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2802
2803 return 0;
2804}
2805
4665ac17 2806static void
d5cc9181 2807mcore_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4665ac17
NF
2808 const_tree type, bool named ATTRIBUTE_UNUSED)
2809{
d5cc9181
JR
2810 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2811
4665ac17
NF
2812 *cum = (ROUND_REG (*cum, mode)
2813 + (int)named * mcore_num_arg_regs (mode, type));
2814}
2815
c2ed6cf8
NF
2816static unsigned int
2817mcore_function_arg_boundary (enum machine_mode mode,
2818 const_tree type ATTRIBUTE_UNUSED)
2819{
2820 /* Doubles must be aligned to an 8 byte boundary. */
2821 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2822 ? BIGGEST_ALIGNMENT
2823 : PARM_BOUNDARY);
2824}
2825
78a52f11
RH
2826/* Returns the number of bytes of argument registers required to hold *part*
2827 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2828 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2829 registers, or entirely on the stack, then 0 is returned. CUM is the
2830 number of argument registers already used by earlier parameters to
2831 the function. */
08903e08 2832
78a52f11 2833static int
d5cc9181 2834mcore_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
78a52f11 2835 tree type, bool named)
8f90be4c 2836{
d5cc9181 2837 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2838
2839 if (named == 0)
2840 return 0;
2841
fe984136 2842 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2843 return 0;
2844
2845 /* REG is not the *hardware* register number of the register that holds
2846 the argument, it is the *argument* register number. So for example,
2847 the first argument to a function goes in argument register 0, which
2848 translates (for the MCore) into hardware register 2. The second
2849 argument goes into argument register 1, which translates into hardware
2850 register 3, and so on. NPARM_REGS is the number of argument registers
2851 supported by the target, not the maximum hardware register number of
2852 the target. */
2853 if (reg >= NPARM_REGS)
2854 return 0;
2855
2856 /* If the argument fits entirely in registers, return 0. */
2857 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2858 return 0;
2859
2860 /* The argument overflows the number of available argument registers.
2861 Compute how many argument registers have not yet been assigned to
2862 hold an argument. */
2863 reg = NPARM_REGS - reg;
2864
2865 /* Return partially in registers and partially on the stack. */
78a52f11 2866 return reg * UNITS_PER_WORD;
8f90be4c
NC
2867}
2868\f
a0ab749a 2869/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2870
8f90be4c 2871int
08903e08 2872mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2873{
2874 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2875}
2876
a0ab749a 2877/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2878
8f90be4c 2879int
08903e08 2880mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2881{
2882 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2883}
2884
2885/* Mark a DECL as being dllexport'd. */
08903e08 2886
8f90be4c 2887static void
08903e08 2888mcore_mark_dllexport (tree decl)
8f90be4c 2889{
cbd3488b 2890 const char * oldname;
8f90be4c
NC
2891 char * newname;
2892 rtx rtlname;
2893 tree idp;
2894
2895 rtlname = XEXP (DECL_RTL (decl), 0);
2896
6e1f65b5
NS
2897 if (GET_CODE (rtlname) == MEM)
2898 rtlname = XEXP (rtlname, 0);
2899 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2900 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2901
2902 if (mcore_dllexport_name_p (oldname))
2903 return; /* Already done. */
2904
5ead67f6 2905 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2906 sprintf (newname, "@e.%s", oldname);
2907
2908 /* We pass newname through get_identifier to ensure it has a unique
2909 address. RTL processing can sometimes peek inside the symbol ref
2910 and compare the string's addresses to see if two symbols are
2911 identical. */
2912 /* ??? At least I think that's why we do this. */
2913 idp = get_identifier (newname);
2914
2915 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2916 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2917}
2918
2919/* Mark a DECL as being dllimport'd. */
08903e08 2920
8f90be4c 2921static void
08903e08 2922mcore_mark_dllimport (tree decl)
8f90be4c 2923{
cbd3488b 2924 const char * oldname;
8f90be4c
NC
2925 char * newname;
2926 tree idp;
2927 rtx rtlname;
2928 rtx newrtl;
2929
2930 rtlname = XEXP (DECL_RTL (decl), 0);
2931
6e1f65b5
NS
2932 if (GET_CODE (rtlname) == MEM)
2933 rtlname = XEXP (rtlname, 0);
2934 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2935 oldname = XSTR (rtlname, 0);
8f90be4c 2936
6e1f65b5
NS
2937 gcc_assert (!mcore_dllexport_name_p (oldname));
2938 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2939 return; /* Already done. */
2940
2941 /* ??? One can well ask why we're making these checks here,
2942 and that would be a good question. */
2943
2944 /* Imported variables can't be initialized. */
2945 if (TREE_CODE (decl) == VAR_DECL
2946 && !DECL_VIRTUAL_P (decl)
2947 && DECL_INITIAL (decl))
2948 {
dee15844 2949 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2950 return;
2951 }
2952
2953 /* `extern' needn't be specified with dllimport.
2954 Specify `extern' now and hope for the best. Sigh. */
2955 if (TREE_CODE (decl) == VAR_DECL
2956 /* ??? Is this test for vtables needed? */
2957 && !DECL_VIRTUAL_P (decl))
2958 {
2959 DECL_EXTERNAL (decl) = 1;
2960 TREE_PUBLIC (decl) = 1;
2961 }
2962
5ead67f6 2963 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2964 sprintf (newname, "@i.__imp_%s", oldname);
2965
2966 /* We pass newname through get_identifier to ensure it has a unique
2967 address. RTL processing can sometimes peek inside the symbol ref
2968 and compare the string's addresses to see if two symbols are
2969 identical. */
2970 /* ??? At least I think that's why we do this. */
2971 idp = get_identifier (newname);
2972
f1c25d3b
KH
2973 newrtl = gen_rtx_MEM (Pmode,
2974 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2975 IDENTIFIER_POINTER (idp)));
2976 XEXP (DECL_RTL (decl), 0) = newrtl;
2977}
2978
2979static int
08903e08 2980mcore_dllexport_p (tree decl)
8f90be4c
NC
2981{
2982 if ( TREE_CODE (decl) != VAR_DECL
2983 && TREE_CODE (decl) != FUNCTION_DECL)
2984 return 0;
2985
91d231cb 2986 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2987}
2988
2989static int
08903e08 2990mcore_dllimport_p (tree decl)
8f90be4c
NC
2991{
2992 if ( TREE_CODE (decl) != VAR_DECL
2993 && TREE_CODE (decl) != FUNCTION_DECL)
2994 return 0;
2995
91d231cb 2996 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2997}
2998
fb49053f 2999/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3000 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3001
3002static void
08903e08 3003mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3004{
8f90be4c
NC
3005 /* Mark the decl so we can tell from the rtl whether the object is
3006 dllexport'd or dllimport'd. */
3007 if (mcore_dllexport_p (decl))
3008 mcore_mark_dllexport (decl);
3009 else if (mcore_dllimport_p (decl))
3010 mcore_mark_dllimport (decl);
3011
3012 /* It might be that DECL has already been marked as dllimport, but
3013 a subsequent definition nullified that. The attribute is gone
3014 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3015 else if ((TREE_CODE (decl) == FUNCTION_DECL
3016 || TREE_CODE (decl) == VAR_DECL)
3017 && DECL_RTL (decl) != NULL_RTX
3018 && GET_CODE (DECL_RTL (decl)) == MEM
3019 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3020 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3021 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3022 {
3cce094d 3023 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3024 tree idp = get_identifier (oldname + 9);
f1c25d3b 3025 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3026
3027 XEXP (DECL_RTL (decl), 0) = newrtl;
3028
3029 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3030 ??? We leave these alone for now. */
3031 }
3032}
3033
772c5265
RH
3034/* Undo the effects of the above. */
3035
3036static const char *
08903e08 3037mcore_strip_name_encoding (const char * str)
772c5265
RH
3038{
3039 return str + (str[0] == '@' ? 3 : 0);
3040}
3041
8f90be4c
NC
3042/* MCore specific attribute support.
3043 dllexport - for exporting a function/variable that will live in a dll
3044 dllimport - for importing a function/variable from a dll
3045 naked - do not create a function prologue/epilogue. */
8f90be4c 3046
91d231cb
JM
3047/* Handle a "naked" attribute; arguments as in
3048 struct attribute_spec.handler. */
08903e08 3049
91d231cb 3050static tree
08903e08
SB
3051mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3052 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3053{
d45eae79 3054 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3055 {
29d08eba
JM
3056 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3057 name);
91d231cb 3058 *no_add_attrs = true;
8f90be4c
NC
3059 }
3060
91d231cb 3061 return NULL_TREE;
8f90be4c
NC
3062}
3063
ae46c4e0
RH
3064/* ??? It looks like this is PE specific? Oh well, this is what the
3065 old code did as well. */
8f90be4c 3066
ae46c4e0 3067static void
08903e08 3068mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3069{
3070 int len;
0139adca 3071 const char * name;
8f90be4c 3072 char * string;
f27cd94d 3073 const char * prefix;
8f90be4c
NC
3074
3075 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3076
3077 /* Strip off any encoding in name. */
772c5265 3078 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3079
3080 /* The object is put in, for example, section .text$foo.
3081 The linker will then ultimately place them in .text
3082 (everything from the $ on is stripped). */
3083 if (TREE_CODE (decl) == FUNCTION_DECL)
3084 prefix = ".text$";
f710504c 3085 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3086 section might have relocs against it. */
4e4d733e 3087 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3088 prefix = ".rdata$";
3089 else
3090 prefix = ".data$";
3091
3092 len = strlen (name) + strlen (prefix);
5ead67f6 3093 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3094
3095 sprintf (string, "%s%s", prefix, name);
3096
f961457f 3097 set_decl_section_name (decl, string);
8f90be4c
NC
3098}
3099
3100int
08903e08 3101mcore_naked_function_p (void)
8f90be4c 3102{
91d231cb 3103 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3104}
7c262518 3105
d45eae79
SL
3106static bool
3107mcore_warn_func_return (tree decl)
3108{
3109 /* Naked functions are implemented entirely in assembly, including the
3110 return sequence, so suppress warnings about this. */
3111 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3112}
3113
ede75ee8 3114#ifdef OBJECT_FORMAT_ELF
7c262518 3115static void
c18a5b6c
MM
3116mcore_asm_named_section (const char *name,
3117 unsigned int flags ATTRIBUTE_UNUSED,
3118 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3119{
3120 fprintf (asm_out_file, "\t.section %s\n", name);
3121}
ede75ee8 3122#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3123
dc7efe6e
KH
3124/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3125
09a2b93a
KH
3126static void
3127mcore_external_libcall (rtx fun)
3128{
3129 fprintf (asm_out_file, "\t.import\t");
3130 assemble_name (asm_out_file, XSTR (fun, 0));
3131 fprintf (asm_out_file, "\n");
3132}
3133
dc7efe6e
KH
3134/* Worker function for TARGET_RETURN_IN_MEMORY. */
3135
09a2b93a 3136static bool
586de218 3137mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3138{
586de218 3139 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3140 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3141}
71e0af3c
RH
3142
3143/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3144 Output assembler code for a block containing the constant parts
3145 of a trampoline, leaving space for the variable parts.
3146
3147 On the MCore, the trampoline looks like:
3148 lrw r1, function
3149 lrw r13, area
3150 jmp r13
3151 or r0, r0
3152 .literals */
3153
3154static void
3155mcore_asm_trampoline_template (FILE *f)
3156{
3157 fprintf (f, "\t.short 0x7102\n");
3158 fprintf (f, "\t.short 0x7d02\n");
3159 fprintf (f, "\t.short 0x00cd\n");
3160 fprintf (f, "\t.short 0x1e00\n");
3161 fprintf (f, "\t.long 0\n");
3162 fprintf (f, "\t.long 0\n");
3163}
3164
3165/* Worker function for TARGET_TRAMPOLINE_INIT. */
3166
3167static void
3168mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3169{
3170 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3171 rtx mem;
3172
3173 emit_block_move (m_tramp, assemble_trampoline_template (),
3174 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3175
3176 mem = adjust_address (m_tramp, SImode, 8);
3177 emit_move_insn (mem, chain_value);
3178 mem = adjust_address (m_tramp, SImode, 12);
3179 emit_move_insn (mem, fnaddr);
3180}
1a627b35
RS
3181
3182/* Implement TARGET_LEGITIMATE_CONSTANT_P
3183
3184 On the MCore, allow anything but a double. */
3185
3186static bool
3187mcore_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3188{
3189 return GET_CODE (x) != CONST_DOUBLE;
3190}