]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
sparc.c (sparc_emit_probe_stack_range): Fix small inaccuracy in the probing code.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
d1e082c2 2 Copyright (C) 1993-2013 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
bc27e96c 20#include "config.h"
4bd048ef 21#include "system.h"
4977bab6
ZW
22#include "coretypes.h"
23#include "tm.h"
4816b8e4
NC
24#include "rtl.h"
25#include "tree.h"
26#include "tm_p.h"
8f90be4c 27#include "mcore.h"
8f90be4c
NC
28#include "regs.h"
29#include "hard-reg-set.h"
8f90be4c
NC
30#include "insn-config.h"
31#include "conditions.h"
8f90be4c
NC
32#include "output.h"
33#include "insn-attr.h"
34#include "flags.h"
35#include "obstack.h"
36#include "expr.h"
37#include "reload.h"
38#include "recog.h"
39#include "function.h"
40#include "ggc.h"
718f9c0f 41#include "diagnostic-core.h"
672a6f42
NB
42#include "target.h"
43#include "target-def.h"
899cc0f4 44#include "df.h"
8f90be4c 45
8f90be4c
NC
46/* For dumping information about frame sizes. */
47char * mcore_current_function_name = 0;
48long mcore_current_compilation_timestamp = 0;
49
50/* Global variables for machine-dependent things. */
51
8f90be4c
NC
52/* Provides the class number of the smallest class containing
53 reg number. */
5a82ecd9 54const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
55{
56 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
57 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
58 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
59 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
60 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
61};
62
f27cd94d
NC
63struct mcore_frame
64{
08903e08
SB
65 int arg_size; /* Stdarg spills (bytes). */
66 int reg_size; /* Non-volatile reg saves (bytes). */
67 int reg_mask; /* Non-volatile reg saves. */
68 int local_size; /* Locals. */
69 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
70 int pad_outbound;
71 int pad_local;
72 int pad_reg;
73 /* Describe the steps we'll use to grow it. */
08903e08 74#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
75 int growth[MAX_STACK_GROWS];
76 int arg_offset;
77 int reg_offset;
78 int reg_growth;
79 int local_growth;
80};
81
82typedef enum
83{
84 COND_NO,
85 COND_MOV_INSN,
86 COND_CLR_INSN,
87 COND_INC_INSN,
88 COND_DEC_INSN,
89 COND_BRANCH_INSN
90}
91cond_type;
92
08903e08
SB
93static void output_stack_adjust (int, int);
94static int calc_live_regs (int *);
6e3a343d 95static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
08903e08 96static const char * output_inline_const (enum machine_mode, rtx *);
08903e08 97static void layout_mcore_frame (struct mcore_frame *);
d5cc9181 98static void mcore_setup_incoming_varargs (cumulative_args_t, enum machine_mode, tree, int *, int);
08903e08
SB
99static cond_type is_cond_candidate (rtx);
100static rtx emit_new_cond_insn (rtx, int);
101static rtx conditionalize_block (rtx);
102static void conditionalize_optimization (void);
103static void mcore_reorg (void);
586de218 104static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
08903e08
SB
105static void mcore_mark_dllexport (tree);
106static void mcore_mark_dllimport (tree);
107static int mcore_dllexport_p (tree);
108static int mcore_dllimport_p (tree);
08903e08 109static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 110#ifdef OBJECT_FORMAT_ELF
08903e08 111static void mcore_asm_named_section (const char *,
c18a5b6c 112 unsigned int, tree);
ede75ee8 113#endif
349f851e
NF
114static void mcore_print_operand (FILE *, rtx, int);
115static void mcore_print_operand_address (FILE *, rtx);
116static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
117static void mcore_unique_section (tree, int);
118static void mcore_encode_section_info (tree, rtx, int);
119static const char *mcore_strip_name_encoding (const char *);
120static int mcore_const_costs (rtx, RTX_CODE);
121static int mcore_and_cost (rtx);
122static int mcore_ior_cost (rtx);
68f932c4
RS
123static bool mcore_rtx_costs (rtx, int, int, int,
124 int *, bool);
09a2b93a 125static void mcore_external_libcall (rtx);
586de218 126static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 127static int mcore_arg_partial_bytes (cumulative_args_t,
78a52f11
RH
128 enum machine_mode,
129 tree, bool);
d5cc9181 130static rtx mcore_function_arg (cumulative_args_t,
4665ac17
NF
131 enum machine_mode,
132 const_tree, bool);
d5cc9181 133static void mcore_function_arg_advance (cumulative_args_t,
4665ac17
NF
134 enum machine_mode,
135 const_tree, bool);
c2ed6cf8
NF
136static unsigned int mcore_function_arg_boundary (enum machine_mode,
137 const_tree);
71e0af3c
RH
138static void mcore_asm_trampoline_template (FILE *);
139static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 140static bool mcore_warn_func_return (tree);
c5387660 141static void mcore_option_override (void);
1a627b35 142static bool mcore_legitimate_constant_p (enum machine_mode, rtx);
5a82ecd9
ILT
143\f
144/* MCore specific attributes. */
145
146static const struct attribute_spec mcore_attribute_table[] =
147{
62d784f7
KT
148 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
149 affects_type_identity } */
150 { "dllexport", 0, 0, true, false, false, NULL, false },
151 { "dllimport", 0, 0, true, false, false, NULL, false },
152 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
153 false },
154 { NULL, 0, 0, false, false, false, NULL, false }
5a82ecd9 155};
672a6f42
NB
156\f
157/* Initialize the GCC target structure. */
09a2b93a
KH
158#undef TARGET_ASM_EXTERNAL_LIBCALL
159#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
160
b2ca3702 161#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
162#undef TARGET_MERGE_DECL_ATTRIBUTES
163#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
164#endif
165
301d03af 166#ifdef OBJECT_FORMAT_ELF
08903e08 167#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 168#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 169#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
170#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
171#endif
172
349f851e
NF
173#undef TARGET_PRINT_OPERAND
174#define TARGET_PRINT_OPERAND mcore_print_operand
175#undef TARGET_PRINT_OPERAND_ADDRESS
176#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
177#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
178#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
179
08903e08
SB
180#undef TARGET_ATTRIBUTE_TABLE
181#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
182#undef TARGET_ASM_UNIQUE_SECTION
183#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
184#undef TARGET_ASM_FUNCTION_RODATA_SECTION
185#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
186#undef TARGET_ENCODE_SECTION_INFO
187#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
188#undef TARGET_STRIP_NAME_ENCODING
189#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
190#undef TARGET_RTX_COSTS
191#define TARGET_RTX_COSTS mcore_rtx_costs
192#undef TARGET_ADDRESS_COST
b413068c 193#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
194#undef TARGET_MACHINE_DEPENDENT_REORG
195#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 196
cde0f3fd
PB
197#undef TARGET_PROMOTE_FUNCTION_MODE
198#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 199#undef TARGET_PROMOTE_PROTOTYPES
586de218 200#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 201
09a2b93a
KH
202#undef TARGET_RETURN_IN_MEMORY
203#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
204#undef TARGET_MUST_PASS_IN_STACK
205#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
206#undef TARGET_PASS_BY_REFERENCE
207#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
208#undef TARGET_ARG_PARTIAL_BYTES
209#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
210#undef TARGET_FUNCTION_ARG
211#define TARGET_FUNCTION_ARG mcore_function_arg
212#undef TARGET_FUNCTION_ARG_ADVANCE
213#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
214#undef TARGET_FUNCTION_ARG_BOUNDARY
215#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
216
217#undef TARGET_SETUP_INCOMING_VARARGS
218#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
219
71e0af3c
RH
220#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
221#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
222#undef TARGET_TRAMPOLINE_INIT
223#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
224
c5387660
JM
225#undef TARGET_OPTION_OVERRIDE
226#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 227
1a627b35
RS
228#undef TARGET_LEGITIMATE_CONSTANT_P
229#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
230
d45eae79
SL
231#undef TARGET_WARN_FUNC_RETURN
232#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
233
f6897b10 234struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 235\f
8f90be4c
NC
236/* Adjust the stack and return the number of bytes taken to do it. */
237static void
08903e08 238output_stack_adjust (int direction, int size)
8f90be4c 239{
4816b8e4 240 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
241 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
242 {
f1c25d3b 243 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 244 rtx memref;
08903e08 245
8f90be4c
NC
246 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
247 do
248 {
249 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 250 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
251 MEM_VOLATILE_P (memref) = 1;
252 emit_insn (gen_movsi (memref, stack_pointer_rtx));
253 size -= mcore_stack_increment;
254 }
255 while (size > mcore_stack_increment);
256
4816b8e4
NC
257 /* SIZE is now the residual for the last adjustment,
258 which doesn't require a probe. */
8f90be4c
NC
259 }
260
261 if (size)
262 {
263 rtx insn;
264 rtx val = GEN_INT (size);
265
266 if (size > 32)
267 {
f1c25d3b 268 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
269 emit_insn (gen_movsi (nval, val));
270 val = nval;
271 }
272
273 if (direction > 0)
274 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
275 else
276 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
277
278 emit_insn (insn);
279 }
280}
281
4816b8e4
NC
282/* Work out the registers which need to be saved,
283 both as a mask and a count. */
284
8f90be4c 285static int
08903e08 286calc_live_regs (int * count)
8f90be4c
NC
287{
288 int reg;
289 int live_regs_mask = 0;
290
291 * count = 0;
292
293 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
294 {
6fb5fa3c 295 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
296 {
297 (*count)++;
298 live_regs_mask |= (1 << reg);
299 }
300 }
301
302 return live_regs_mask;
303}
304
305/* Print the operand address in x to the stream. */
4816b8e4 306
349f851e 307static void
08903e08 308mcore_print_operand_address (FILE * stream, rtx x)
8f90be4c
NC
309{
310 switch (GET_CODE (x))
311 {
312 case REG:
313 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
314 break;
315
316 case PLUS:
317 {
318 rtx base = XEXP (x, 0);
319 rtx index = XEXP (x, 1);
320
321 if (GET_CODE (base) != REG)
322 {
323 /* Ensure that BASE is a register (one of them must be). */
324 rtx temp = base;
325 base = index;
326 index = temp;
327 }
328
329 switch (GET_CODE (index))
330 {
331 case CONST_INT:
fd7b8952
KG
332 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
333 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
334 break;
335
336 default:
6e1f65b5 337 gcc_unreachable ();
8f90be4c
NC
338 }
339 }
340
341 break;
342
343 default:
344 output_addr_const (stream, x);
345 break;
346 }
347}
348
349f851e
NF
349static bool
350mcore_print_operand_punct_valid_p (unsigned char code)
351{
352 return (code == '.' || code == '#' || code == '*' || code == '^'
353 || code == '!');
354}
355
8f90be4c
NC
356/* Print operand x (an rtx) in assembler syntax to file stream
357 according to modifier code.
358
112cdef5 359 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
360 a double word value
361 'O' print a constant without the #
362 'M' print a constant as its negative
363 'P' print log2 of a power of two
364 'Q' print log2 of an inverse of a power of two
365 'U' print register for ldm/stm instruction
4816b8e4
NC
366 'X' print byte number for xtrbN instruction. */
367
349f851e 368static void
08903e08 369mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
370{
371 switch (code)
372 {
373 case 'N':
374 if (INTVAL(x) == -1)
375 fprintf (asm_out_file, "32");
376 else
377 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
378 break;
379 case 'P':
6e3a343d 380 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
381 break;
382 case 'Q':
383 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
384 break;
385 case 'O':
fd7b8952 386 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
387 break;
388 case 'M':
fd7b8952 389 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
390 break;
391 case 'R':
392 /* Next location along in memory or register. */
393 switch (GET_CODE (x))
394 {
395 case REG:
396 fputs (reg_names[REGNO (x) + 1], (stream));
397 break;
398 case MEM:
b72f00af
RK
399 mcore_print_operand_address
400 (stream, XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
401 break;
402 default:
6e1f65b5 403 gcc_unreachable ();
8f90be4c
NC
404 }
405 break;
406 case 'U':
407 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
408 reg_names[REGNO (x) + 3]);
409 break;
410 case 'x':
fd7b8952 411 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
412 break;
413 case 'X':
fd7b8952 414 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
415 break;
416
417 default:
418 switch (GET_CODE (x))
419 {
420 case REG:
421 fputs (reg_names[REGNO (x)], (stream));
422 break;
423 case MEM:
424 output_address (XEXP (x, 0));
425 break;
426 default:
427 output_addr_const (stream, x);
428 break;
429 }
430 break;
431 }
432}
433
434/* What does a constant cost ? */
4816b8e4 435
3c50106f 436static int
08903e08 437mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 438{
6e3a343d 439 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
440
441 /* Easy constants. */
442 if ( CONST_OK_FOR_I (val)
443 || CONST_OK_FOR_M (val)
444 || CONST_OK_FOR_N (val)
445 || (code == PLUS && CONST_OK_FOR_L (val)))
446 return 1;
447 else if (code == AND
448 && ( CONST_OK_FOR_M (~val)
449 || CONST_OK_FOR_N (~val)))
450 return 2;
451 else if (code == PLUS
452 && ( CONST_OK_FOR_I (-val)
453 || CONST_OK_FOR_M (-val)
454 || CONST_OK_FOR_N (-val)))
455 return 2;
456
457 return 5;
458}
459
460/* What does an and instruction cost - we do this b/c immediates may
461 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
462 out. Otherwise we'll get bad code (multiple reloads of the same const). */
463
3c50106f 464static int
08903e08 465mcore_and_cost (rtx x)
8f90be4c 466{
6e3a343d 467 HOST_WIDE_INT val;
8f90be4c
NC
468
469 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
470 return 2;
471
472 val = INTVAL (XEXP (x, 1));
473
4816b8e4 474 /* Do it directly. */
8f90be4c
NC
475 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
476 return 2;
477 /* Takes one instruction to load. */
478 else if (const_ok_for_mcore (val))
479 return 3;
480 /* Takes two instructions to load. */
481 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
482 return 4;
483
4816b8e4 484 /* Takes a lrw to load. */
8f90be4c
NC
485 return 5;
486}
487
4816b8e4
NC
488/* What does an or cost - see and_cost(). */
489
3c50106f 490static int
08903e08 491mcore_ior_cost (rtx x)
8f90be4c 492{
6e3a343d 493 HOST_WIDE_INT val;
8f90be4c
NC
494
495 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
496 return 2;
497
498 val = INTVAL (XEXP (x, 1));
499
4816b8e4 500 /* Do it directly with bclri. */
8f90be4c
NC
501 if (CONST_OK_FOR_M (val))
502 return 2;
4816b8e4 503 /* Takes one instruction to load. */
8f90be4c
NC
504 else if (const_ok_for_mcore (val))
505 return 3;
4816b8e4 506 /* Takes two instructions to load. */
8f90be4c
NC
507 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
508 return 4;
509
4816b8e4 510 /* Takes a lrw to load. */
8f90be4c
NC
511 return 5;
512}
513
3c50106f 514static bool
68f932c4
RS
515mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
516 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f
RH
517{
518 switch (code)
519 {
520 case CONST_INT:
5a82ecd9 521 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
522 return true;
523 case CONST:
524 case LABEL_REF:
525 case SYMBOL_REF:
526 *total = 5;
527 return true;
528 case CONST_DOUBLE:
529 *total = 10;
530 return true;
531
532 case AND:
533 *total = COSTS_N_INSNS (mcore_and_cost (x));
534 return true;
535
536 case IOR:
537 *total = COSTS_N_INSNS (mcore_ior_cost (x));
538 return true;
539
540 case DIV:
541 case UDIV:
542 case MOD:
543 case UMOD:
544 case FLOAT:
545 case FIX:
546 *total = COSTS_N_INSNS (100);
547 return true;
548
549 default:
550 return false;
551 }
552}
553
f90b7a5a
PB
554/* Prepare the operands for a comparison. Return whether the branch/setcc
555 should reverse the operands. */
4816b8e4 556
f90b7a5a
PB
557bool
558mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 559{
f90b7a5a
PB
560 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
561 bool invert;
562
8f90be4c
NC
563 if (GET_CODE (op1) == CONST_INT)
564 {
6e3a343d 565 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
566
567 switch (code)
568 {
f90b7a5a
PB
569 case GTU:
570 /* Unsigned > 0 is the same as != 0; everything else is converted
571 below to LEU (reversed cmphs). */
572 if (val == 0)
573 code = NE;
574 break;
575
576 /* Check whether (LE A imm) can become (LT A imm + 1),
577 or (GT A imm) can become (GE A imm + 1). */
578 case GT:
8f90be4c
NC
579 case LE:
580 if (CONST_OK_FOR_J (val + 1))
581 {
f90b7a5a
PB
582 op1 = GEN_INT (val + 1);
583 code = code == LE ? LT : GE;
8f90be4c
NC
584 }
585 break;
586
587 default:
588 break;
589 }
590 }
f90b7a5a 591
8f90be4c
NC
592 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
593 op1 = force_reg (SImode, op1);
594
595 /* cmpnei: 0-31 (K immediate)
4816b8e4 596 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 597 invert = false;
8f90be4c
NC
598 switch (code)
599 {
4816b8e4 600 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 601 code = NE;
f90b7a5a 602 invert = true;
08903e08 603 /* Drop through. */
4816b8e4
NC
604
605 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
606 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
607 op1 = force_reg (SImode, op1);
608 break;
609
4816b8e4 610 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 611 code = GT;
f90b7a5a 612 invert = true;
08903e08 613 /* Drop through. */
4816b8e4
NC
614
615 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
616 if (GET_CODE (op1) == CONST_INT)
617 op1 = force_reg (SImode, op1);
618 break;
619
4816b8e4 620 case GE: /* Use inverted condition, cmplt. */
8f90be4c 621 code = LT;
f90b7a5a 622 invert = true;
08903e08 623 /* Drop through. */
4816b8e4
NC
624
625 case LT: /* Use normal condition, cmplt. */
8f90be4c 626 if (GET_CODE (op1) == CONST_INT &&
08903e08 627 /* covered by btsti x,31. */
8f90be4c
NC
628 INTVAL (op1) != 0 &&
629 ! CONST_OK_FOR_J (INTVAL (op1)))
630 op1 = force_reg (SImode, op1);
631 break;
632
4816b8e4 633 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 634 /* We coped with unsigned > 0 above. */
6e1f65b5 635 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 636 code = LEU;
f90b7a5a 637 invert = true;
08903e08 638 /* Drop through. */
4816b8e4 639
14bc6742 640 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
641 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
642 op1 = force_reg (SImode, op1);
643 break;
644
4816b8e4 645 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 646 code = GEU;
f90b7a5a 647 invert = true;
08903e08 648 /* Drop through. */
4816b8e4
NC
649
650 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
651 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
652 op1 = force_reg (SImode, op1);
653 break;
654
655 default:
656 break;
657 }
658
f90b7a5a
PB
659 emit_insn (gen_rtx_SET (VOIDmode,
660 cc_reg,
661 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
662 return invert;
8f90be4c
NC
663}
664
8f90be4c 665int
08903e08 666mcore_symbolic_address_p (rtx x)
8f90be4c
NC
667{
668 switch (GET_CODE (x))
669 {
670 case SYMBOL_REF:
671 case LABEL_REF:
672 return 1;
673 case CONST:
674 x = XEXP (x, 0);
675 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
676 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
677 && GET_CODE (XEXP (x, 1)) == CONST_INT);
678 default:
679 return 0;
680 }
681}
682
8f90be4c 683/* Functions to output assembly code for a function call. */
f27cd94d 684
8f90be4c 685char *
08903e08 686mcore_output_call (rtx operands[], int index)
8f90be4c
NC
687{
688 static char buffer[20];
689 rtx addr = operands [index];
690
691 if (REG_P (addr))
692 {
693 if (TARGET_CG_DATA)
694 {
6e1f65b5 695 gcc_assert (mcore_current_function_name);
8f90be4c
NC
696
697 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
698 "unknown", 1);
699 }
700
701 sprintf (buffer, "jsr\t%%%d", index);
702 }
703 else
704 {
705 if (TARGET_CG_DATA)
706 {
6e1f65b5
NS
707 gcc_assert (mcore_current_function_name);
708 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 709
6e1f65b5
NS
710 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
711 XSTR (addr, 0), 0);
8f90be4c
NC
712 }
713
714 sprintf (buffer, "jbsr\t%%%d", index);
715 }
716
717 return buffer;
718}
719
720/* Can we load a constant with a single instruction ? */
4816b8e4 721
54d58eaf 722int
6e3a343d 723const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
724{
725 if (value >= 0 && value <= 127)
726 return 1;
727
728 /* Try exact power of two. */
6e3a343d 729 if (CONST_OK_FOR_M (value))
8f90be4c
NC
730 return 1;
731
14bc6742 732 /* Try exact power of two - 1. */
6e3a343d 733 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
734 return 1;
735
736 return 0;
737}
738
739/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 740
8f90be4c 741int
6e3a343d 742mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 743{
6e3a343d 744 HOST_WIDE_INT x, y;
8f90be4c
NC
745
746 return try_constant_tricks (value, & x, & y) > 0;
747}
748
749/* Are we loading the constant using a not ? */
4816b8e4 750
8f90be4c 751int
6e3a343d 752mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 753{
6e3a343d 754 HOST_WIDE_INT x, y;
8f90be4c
NC
755
756 return try_constant_tricks (value, & x, & y) == 2;
757}
758
759/* Try tricks to load a constant inline and return the trick number if
760 success (0 is non-inlinable).
4816b8e4
NC
761
762 0: not inlinable
763 1: single instruction (do the usual thing)
764 2: single insn followed by a 'not'
765 3: single insn followed by a subi
766 4: single insn followed by an addi
767 5: single insn followed by rsubi
768 6: single insn followed by bseti
769 7: single insn followed by bclri
770 8: single insn followed by rotli
771 9: single insn followed by lsli
772 10: single insn followed by ixh
773 11: single insn followed by ixw. */
8f90be4c
NC
774
775static int
6e3a343d 776try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 777{
6e3a343d
NC
778 HOST_WIDE_INT i;
779 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
780
781 if (const_ok_for_mcore (value))
4816b8e4 782 return 1; /* Do the usual thing. */
8f90be4c 783
6e3a343d
NC
784 if (! TARGET_HARDLIT)
785 return 0;
786
787 if (const_ok_for_mcore (~value))
788 {
789 *x = ~value;
790 return 2;
791 }
792
793 for (i = 1; i <= 32; i++)
8f90be4c 794 {
6e3a343d 795 if (const_ok_for_mcore (value - i))
8f90be4c 796 {
6e3a343d
NC
797 *x = value - i;
798 *y = i;
799
800 return 3;
8f90be4c 801 }
6e3a343d
NC
802
803 if (const_ok_for_mcore (value + i))
8f90be4c 804 {
6e3a343d
NC
805 *x = value + i;
806 *y = i;
807
808 return 4;
8f90be4c 809 }
6e3a343d
NC
810 }
811
812 bit = 0x80000000ULL;
813
814 for (i = 0; i <= 31; i++)
815 {
816 if (const_ok_for_mcore (i - value))
8f90be4c 817 {
6e3a343d
NC
818 *x = i - value;
819 *y = i;
820
821 return 5;
8f90be4c 822 }
6e3a343d
NC
823
824 if (const_ok_for_mcore (value & ~bit))
8f90be4c 825 {
6e3a343d
NC
826 *y = bit;
827 *x = value & ~bit;
828 return 6;
8f90be4c 829 }
6e3a343d
NC
830
831 if (const_ok_for_mcore (value | bit))
8f90be4c 832 {
6e3a343d
NC
833 *y = ~bit;
834 *x = value | bit;
835
836 return 7;
8f90be4c 837 }
6e3a343d
NC
838
839 bit >>= 1;
840 }
841
842 shf = value;
843 rot = value;
844
845 for (i = 1; i < 31; i++)
846 {
847 int c;
848
849 /* MCore has rotate left. */
850 c = rot << 31;
851 rot >>= 1;
852 rot &= 0x7FFFFFFF;
853 rot |= c; /* Simulate rotate. */
854
855 if (const_ok_for_mcore (rot))
8f90be4c 856 {
6e3a343d
NC
857 *y = i;
858 *x = rot;
859
860 return 8;
861 }
862
863 if (shf & 1)
864 shf = 0; /* Can't use logical shift, low order bit is one. */
865
866 shf >>= 1;
867
868 if (shf != 0 && const_ok_for_mcore (shf))
869 {
870 *y = i;
871 *x = shf;
872
873 return 9;
8f90be4c
NC
874 }
875 }
6e3a343d
NC
876
877 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
878 {
879 *x = value / 3;
880
881 return 10;
882 }
883
884 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
885 {
886 *x = value / 5;
887
888 return 11;
889 }
8f90be4c
NC
890
891 return 0;
892}
893
8f90be4c
NC
894/* Check whether reg is dead at first. This is done by searching ahead
895 for either the next use (i.e., reg is live), a death note, or a set of
896 reg. Don't just use dead_or_set_p() since reload does not always mark
897 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
898 can ignore subregs by extracting the actual register. BRC */
899
8f90be4c 900int
08903e08 901mcore_is_dead (rtx first, rtx reg)
8f90be4c
NC
902{
903 rtx insn;
904
905 /* For mcore, subregs can't live independently of their parent regs. */
906 if (GET_CODE (reg) == SUBREG)
907 reg = SUBREG_REG (reg);
908
909 /* Dies immediately. */
910 if (dead_or_set_p (first, reg))
911 return 1;
912
913 /* Look for conclusive evidence of live/death, otherwise we have
914 to assume that it is live. */
915 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
916 {
917 if (GET_CODE (insn) == JUMP_INSN)
918 return 0; /* We lose track, assume it is alive. */
919
920 else if (GET_CODE(insn) == CALL_INSN)
921 {
922 /* Call's might use it for target or register parms. */
923 if (reg_referenced_p (reg, PATTERN (insn))
924 || find_reg_fusage (insn, USE, reg))
925 return 0;
926 else if (dead_or_set_p (insn, reg))
927 return 1;
928 }
929 else if (GET_CODE (insn) == INSN)
930 {
931 if (reg_referenced_p (reg, PATTERN (insn)))
932 return 0;
933 else if (dead_or_set_p (insn, reg))
934 return 1;
935 }
936 }
937
1e5f1716 938 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
939 that control flow hid the use from us -- "I'm not dead yet". */
940 return 0;
941}
942
8f90be4c 943/* Count the number of ones in mask. */
4816b8e4 944
8f90be4c 945int
6e3a343d 946mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 947{
4816b8e4 948 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
949 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
950 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
951 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
952 mask = ((mask >> 8) + mask);
953
954 return (mask + (mask >> 16)) & 0xff;
955}
956
4816b8e4
NC
957/* Count the number of zeros in mask. */
958
8f90be4c 959int
6e3a343d 960mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
961{
962 return 32 - mcore_num_ones (mask);
963}
964
965/* Determine byte being masked. */
4816b8e4 966
8f90be4c 967int
08903e08 968mcore_byte_offset (unsigned int mask)
8f90be4c 969{
11f9ed1a 970 if (mask == 0x00ffffffL)
8f90be4c 971 return 0;
11f9ed1a 972 else if (mask == 0xff00ffffL)
8f90be4c 973 return 1;
11f9ed1a 974 else if (mask == 0xffff00ffL)
8f90be4c 975 return 2;
11f9ed1a 976 else if (mask == 0xffffff00L)
8f90be4c
NC
977 return 3;
978
979 return -1;
980}
981
982/* Determine halfword being masked. */
4816b8e4 983
8f90be4c 984int
08903e08 985mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
986{
987 if (mask == 0x0000ffffL)
988 return 0;
11f9ed1a 989 else if (mask == 0xffff0000L)
8f90be4c
NC
990 return 1;
991
992 return -1;
993}
994
995/* Output a series of bseti's corresponding to mask. */
4816b8e4 996
f27cd94d 997const char *
08903e08 998mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
999{
1000 rtx out_operands[2];
1001 int bit;
1002
1003 out_operands[0] = dst;
1004
1005 for (bit = 0; bit < 32; bit++)
1006 {
1007 if ((mask & 0x1) == 0x1)
1008 {
1009 out_operands[1] = GEN_INT (bit);
1010
1011 output_asm_insn ("bseti\t%0,%1", out_operands);
1012 }
1013 mask >>= 1;
1014 }
1015
1016 return "";
1017}
1018
1019/* Output a series of bclri's corresponding to mask. */
4816b8e4 1020
f27cd94d 1021const char *
08903e08 1022mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1023{
1024 rtx out_operands[2];
1025 int bit;
1026
1027 out_operands[0] = dst;
1028
1029 for (bit = 0; bit < 32; bit++)
1030 {
1031 if ((mask & 0x1) == 0x0)
1032 {
1033 out_operands[1] = GEN_INT (bit);
1034
1035 output_asm_insn ("bclri\t%0,%1", out_operands);
1036 }
1037
1038 mask >>= 1;
1039 }
1040
1041 return "";
1042}
1043
1044/* Output a conditional move of two constants that are +/- 1 within each
1045 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1046 really worth the effort. */
4816b8e4 1047
f27cd94d 1048const char *
08903e08 1049mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1050{
6e3a343d
NC
1051 HOST_WIDE_INT load_value;
1052 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1053 rtx out_operands[4];
1054
1055 out_operands[0] = operands[0];
1056
4816b8e4 1057 /* Check to see which constant is loadable. */
8f90be4c
NC
1058 if (const_ok_for_mcore (INTVAL (operands[1])))
1059 {
1060 out_operands[1] = operands[1];
1061 out_operands[2] = operands[2];
1062 }
1063 else if (const_ok_for_mcore (INTVAL (operands[2])))
1064 {
1065 out_operands[1] = operands[2];
1066 out_operands[2] = operands[1];
1067
4816b8e4 1068 /* Complement test since constants are swapped. */
8f90be4c
NC
1069 cmp_t = (cmp_t == 0);
1070 }
1071 load_value = INTVAL (out_operands[1]);
1072 adjust_value = INTVAL (out_operands[2]);
1073
4816b8e4 1074 /* First output the test if folded into the pattern. */
8f90be4c
NC
1075
1076 if (test)
1077 output_asm_insn (test, operands);
1078
4816b8e4 1079 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1080 generated with a single instruction. maybe add general inlinable
1081 constants later (this will increase the # of patterns since the
4816b8e4 1082 instruction sequence has a different length attribute). */
8f90be4c
NC
1083 if (load_value >= 0 && load_value <= 127)
1084 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1085 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1086 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1087 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1088 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1089
4816b8e4 1090 /* Output the constant adjustment. */
8f90be4c
NC
1091 if (load_value > adjust_value)
1092 {
1093 if (cmp_t)
1094 output_asm_insn ("decf\t%0", out_operands);
1095 else
1096 output_asm_insn ("dect\t%0", out_operands);
1097 }
1098 else
1099 {
1100 if (cmp_t)
1101 output_asm_insn ("incf\t%0", out_operands);
1102 else
1103 output_asm_insn ("inct\t%0", out_operands);
1104 }
1105
1106 return "";
1107}
1108
1109/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1110 by an and (i.e. combine the not and the and into andn). BRC */
1111
f27cd94d 1112const char *
08903e08 1113mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1114{
6e3a343d 1115 HOST_WIDE_INT x, y;
8f90be4c 1116 rtx out_operands[3];
f27cd94d 1117 const char * load_op;
8f90be4c 1118 char buf[256];
6e1f65b5 1119 int trick_no;
8f90be4c 1120
6e1f65b5
NS
1121 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1122 gcc_assert (trick_no == 2);
8f90be4c
NC
1123
1124 out_operands[0] = operands[0];
6e3a343d 1125 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1126 out_operands[2] = operands[2];
1127
1128 if (x >= 0 && x <= 127)
1129 load_op = "movi\t%0,%1";
4816b8e4
NC
1130
1131 /* Try exact power of two. */
6e3a343d 1132 else if (CONST_OK_FOR_M (x))
8f90be4c 1133 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1134
1135 /* Try exact power of two - 1. */
6e3a343d 1136 else if (CONST_OK_FOR_N (x))
8f90be4c 1137 load_op = "bmaski\t%0,%N1";
4816b8e4 1138
6e3a343d
NC
1139 else
1140 {
1141 load_op = "BADMOVI-andn\t%0, %1";
1142 gcc_unreachable ();
1143 }
8f90be4c
NC
1144
1145 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1146 output_asm_insn (buf, out_operands);
1147
1148 return "";
1149}
1150
1151/* Output an inline constant. */
4816b8e4 1152
f27cd94d 1153static const char *
08903e08 1154output_inline_const (enum machine_mode mode, rtx operands[])
8f90be4c 1155{
6e3a343d 1156 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1157 int trick_no;
1158 rtx out_operands[3];
1159 char buf[256];
1160 char load_op[256];
f27cd94d 1161 const char *dst_fmt;
6e3a343d 1162 HOST_WIDE_INT value;
8f90be4c
NC
1163
1164 value = INTVAL (operands[1]);
8f90be4c 1165
6e1f65b5
NS
1166 trick_no = try_constant_tricks (value, &x, &y);
1167 /* lrw's are handled separately: Large inlinable constants never get
1168 turned into lrw's. Our caller uses try_constant_tricks to back
1169 off to an lrw rather than calling this routine. */
1170 gcc_assert (trick_no != 0);
1171
8f90be4c
NC
1172 if (trick_no == 1)
1173 x = value;
1174
4816b8e4 1175 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1176 out_operands[0] = operands[0];
1177 out_operands[1] = GEN_INT (x);
1178
1179 if (trick_no > 2)
1180 out_operands[2] = GEN_INT (y);
1181
4816b8e4 1182 /* Select dst format based on mode. */
8f90be4c
NC
1183 if (mode == DImode && (! TARGET_LITTLE_END))
1184 dst_fmt = "%R0";
1185 else
1186 dst_fmt = "%0";
1187
1188 if (x >= 0 && x <= 127)
1189 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1190
8f90be4c 1191 /* Try exact power of two. */
6e3a343d 1192 else if (CONST_OK_FOR_M (x))
8f90be4c 1193 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1194
1195 /* Try exact power of two - 1. */
6e3a343d 1196 else if (CONST_OK_FOR_N (x))
8f90be4c 1197 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1198
6e3a343d
NC
1199 else
1200 {
1201 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1202 gcc_unreachable ();
1203 }
8f90be4c
NC
1204
1205 switch (trick_no)
1206 {
1207 case 1:
1208 strcpy (buf, load_op);
1209 break;
1210 case 2: /* not */
6e3a343d 1211 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1212 break;
1213 case 3: /* add */
6e3a343d 1214 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1215 break;
1216 case 4: /* sub */
6e3a343d 1217 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1218 break;
1219 case 5: /* rsub */
4816b8e4 1220 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1221 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1222 break;
6e3a343d
NC
1223 case 6: /* bseti */
1224 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1225 break;
1226 case 7: /* bclr */
6e3a343d 1227 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1228 break;
1229 case 8: /* rotl */
6e3a343d 1230 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1231 break;
1232 case 9: /* lsl */
6e3a343d 1233 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1234 break;
1235 case 10: /* ixh */
6e3a343d 1236 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1237 break;
1238 case 11: /* ixw */
6e3a343d 1239 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1240 break;
1241 default:
1242 return "";
1243 }
1244
1245 output_asm_insn (buf, out_operands);
1246
1247 return "";
1248}
1249
1250/* Output a move of a word or less value. */
4816b8e4 1251
f27cd94d 1252const char *
08903e08
SB
1253mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1254 enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1255{
1256 rtx dst = operands[0];
1257 rtx src = operands[1];
1258
1259 if (GET_CODE (dst) == REG)
1260 {
1261 if (GET_CODE (src) == REG)
1262 {
1263 if (REGNO (src) == CC_REG) /* r-c */
1264 return "mvc\t%0";
1265 else
1266 return "mov\t%0,%1"; /* r-r*/
1267 }
1268 else if (GET_CODE (src) == MEM)
1269 {
1270 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1271 return "lrw\t%0,[%1]"; /* a-R */
1272 else
f0f4da32
RS
1273 switch (GET_MODE (src)) /* r-m */
1274 {
1275 case SImode:
1276 return "ldw\t%0,%1";
1277 case HImode:
1278 return "ld.h\t%0,%1";
1279 case QImode:
1280 return "ld.b\t%0,%1";
1281 default:
6e1f65b5 1282 gcc_unreachable ();
f0f4da32 1283 }
8f90be4c
NC
1284 }
1285 else if (GET_CODE (src) == CONST_INT)
1286 {
6e3a343d 1287 HOST_WIDE_INT x, y;
8f90be4c
NC
1288
1289 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1290 return "movi\t%0,%1";
1291 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1292 return "bgeni\t%0,%P1\t// %1 %x1";
1293 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1294 return "bmaski\t%0,%N1\t// %1 %x1";
1295 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1296 return output_inline_const (SImode, operands); /* 1-2 insns */
1297 else
4816b8e4 1298 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1299 }
1300 else
4816b8e4 1301 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1302 }
1303 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1304 switch (GET_MODE (dst))
1305 {
1306 case SImode:
1307 return "stw\t%1,%0";
1308 case HImode:
1309 return "st.h\t%1,%0";
1310 case QImode:
1311 return "st.b\t%1,%0";
1312 default:
6e1f65b5 1313 gcc_unreachable ();
f0f4da32 1314 }
8f90be4c 1315
6e1f65b5 1316 gcc_unreachable ();
8f90be4c
NC
1317}
1318
8f90be4c
NC
1319/* Return a sequence of instructions to perform DI or DF move.
1320 Since the MCORE cannot move a DI or DF in one instruction, we have
1321 to take care when we see overlapping source and dest registers. */
4816b8e4 1322
f27cd94d 1323const char *
08903e08 1324mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1325{
1326 rtx dst = operands[0];
1327 rtx src = operands[1];
1328
1329 if (GET_CODE (dst) == REG)
1330 {
1331 if (GET_CODE (src) == REG)
1332 {
1333 int dstreg = REGNO (dst);
1334 int srcreg = REGNO (src);
4816b8e4 1335
8f90be4c
NC
1336 /* Ensure the second source not overwritten. */
1337 if (srcreg + 1 == dstreg)
1338 return "mov %R0,%R1\n\tmov %0,%1";
1339 else
1340 return "mov %0,%1\n\tmov %R0,%R1";
1341 }
1342 else if (GET_CODE (src) == MEM)
1343 {
d72fe292 1344 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1345 int dstreg = REGNO (dst);
1346 int basereg = -1;
1347
1348 if (GET_CODE (memexp) == LABEL_REF)
1349 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1350 else if (GET_CODE (memexp) == REG)
1351 basereg = REGNO (memexp);
1352 else if (GET_CODE (memexp) == PLUS)
1353 {
1354 if (GET_CODE (XEXP (memexp, 0)) == REG)
1355 basereg = REGNO (XEXP (memexp, 0));
1356 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1357 basereg = REGNO (XEXP (memexp, 1));
1358 else
6e1f65b5 1359 gcc_unreachable ();
8f90be4c
NC
1360 }
1361 else
6e1f65b5 1362 gcc_unreachable ();
8f90be4c 1363
4816b8e4 1364 /* ??? length attribute is wrong here. */
8f90be4c
NC
1365 if (dstreg == basereg)
1366 {
4816b8e4 1367 /* Just load them in reverse order. */
8f90be4c 1368 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1369
8f90be4c 1370 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1371 and then fall through. */
8f90be4c
NC
1372 }
1373 else
1374 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1375 }
1376 else if (GET_CODE (src) == CONST_INT)
1377 {
1378 if (TARGET_LITTLE_END)
1379 {
1380 if (CONST_OK_FOR_I (INTVAL (src)))
1381 output_asm_insn ("movi %0,%1", operands);
1382 else if (CONST_OK_FOR_M (INTVAL (src)))
1383 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1384 else if (CONST_OK_FOR_N (INTVAL (src)))
1385 output_asm_insn ("bmaski %0,%N1", operands);
1386 else
6e1f65b5 1387 gcc_unreachable ();
8f90be4c
NC
1388
1389 if (INTVAL (src) < 0)
1390 return "bmaski %R0,32";
1391 else
1392 return "movi %R0,0";
1393 }
1394 else
1395 {
1396 if (CONST_OK_FOR_I (INTVAL (src)))
1397 output_asm_insn ("movi %R0,%1", operands);
1398 else if (CONST_OK_FOR_M (INTVAL (src)))
1399 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1400 else if (CONST_OK_FOR_N (INTVAL (src)))
1401 output_asm_insn ("bmaski %R0,%N1", operands);
1402 else
6e1f65b5 1403 gcc_unreachable ();
6e3a343d 1404
8f90be4c
NC
1405 if (INTVAL (src) < 0)
1406 return "bmaski %0,32";
1407 else
1408 return "movi %0,0";
1409 }
1410 }
1411 else
6e1f65b5 1412 gcc_unreachable ();
8f90be4c
NC
1413 }
1414 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1415 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1416 else
6e1f65b5 1417 gcc_unreachable ();
8f90be4c
NC
1418}
1419
1420/* Predicates used by the templates. */
1421
8f90be4c 1422int
08903e08 1423mcore_arith_S_operand (rtx op)
8f90be4c
NC
1424{
1425 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1426 return 1;
1427
1428 return 0;
1429}
1430
4816b8e4
NC
1431/* Expand insert bit field. BRC */
1432
8f90be4c 1433int
08903e08 1434mcore_expand_insv (rtx operands[])
8f90be4c
NC
1435{
1436 int width = INTVAL (operands[1]);
1437 int posn = INTVAL (operands[2]);
1438 int mask;
1439 rtx mreg, sreg, ereg;
1440
1441 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1442 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1443 we really want the md part to do. */
8f90be4c
NC
1444 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1445 {
4816b8e4
NC
1446 /* Do directly with bseti or bclri. */
1447 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1448 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1449 {
1450 mask = ~(1 << posn);
f1c25d3b
KH
1451 emit_insn (gen_rtx_SET (SImode, operands[0],
1452 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1453 }
1454 else
1455 {
1456 mask = 1 << posn;
f1c25d3b
KH
1457 emit_insn (gen_rtx_SET (SImode, operands[0],
1458 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
8f90be4c
NC
1459 }
1460
1461 return 1;
1462 }
1463
43a88a8c 1464 /* Look at some bit-field placements that we aren't interested
4816b8e4 1465 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1466 if (! TARGET_W_FIELD)
1467 return 0; /* Generally, give up about now. */
1468
1469 if (width == 8 && posn % 8 == 0)
1470 /* Byte sized and aligned; let caller break it up. */
1471 return 0;
1472
1473 if (width == 16 && posn % 16 == 0)
1474 /* Short sized and aligned; let caller break it up. */
1475 return 0;
1476
1477 /* The general case - we can do this a little bit better than what the
1478 machine independent part tries. This will get rid of all the subregs
1479 that mess up constant folding in combine when working with relaxed
4816b8e4 1480 immediates. */
8f90be4c
NC
1481
1482 /* If setting the entire field, do it directly. */
6e3a343d
NC
1483 if (GET_CODE (operands[3]) == CONST_INT
1484 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1485 {
1486 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f1c25d3b
KH
1487 emit_insn (gen_rtx_SET (SImode, operands[0],
1488 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1489 return 1;
1490 }
1491
1492 /* Generate the clear mask. */
1493 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1494
1495 /* Clear the field, to overlay it later with the source. */
f1c25d3b
KH
1496 emit_insn (gen_rtx_SET (SImode, operands[0],
1497 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1498
1499 /* If the source is constant 0, we've nothing to add back. */
1500 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1501 return 1;
1502
1503 /* XXX: Should we worry about more games with constant values?
1504 We've covered the high profile: set/clear single-bit and many-bit
1505 fields. How often do we see "arbitrary bit pattern" constants? */
1506 sreg = copy_to_mode_reg (SImode, operands[3]);
1507
1508 /* Extract src as same width as dst (needed for signed values). We
1509 always have to do this since we widen everything to SImode.
1510 We don't have to mask if we're shifting this up against the
1511 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1512 bits. */
f27cd94d 1513 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1514 {
1515 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f1c25d3b
KH
1516 emit_insn (gen_rtx_SET (SImode, sreg,
1517 gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1518 }
1519
4816b8e4 1520 /* Insert source value in dest. */
8f90be4c 1521 if (posn != 0)
f1c25d3b
KH
1522 emit_insn (gen_rtx_SET (SImode, sreg,
1523 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
8f90be4c 1524
f1c25d3b
KH
1525 emit_insn (gen_rtx_SET (SImode, operands[0],
1526 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1527
1528 return 1;
1529}
8f90be4c
NC
1530\f
1531/* ??? Block move stuff stolen from m88k. This code has not been
1532 verified for correctness. */
1533
1534/* Emit code to perform a block move. Choose the best method.
1535
1536 OPERANDS[0] is the destination.
1537 OPERANDS[1] is the source.
1538 OPERANDS[2] is the size.
1539 OPERANDS[3] is the alignment safe to use. */
1540
1541/* Emit code to perform a block move with an offset sequence of ldw/st
1542 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1543 known constants. DEST and SRC are registers. OFFSET is the known
1544 starting point for the output pattern. */
1545
8b60264b 1546static const enum machine_mode mode_from_align[] =
8f90be4c
NC
1547{
1548 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1549};
1550
1551static void
88042663 1552block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1553{
1554 rtx temp[2];
1555 enum machine_mode mode[2];
1556 int amount[2];
88042663 1557 bool active[2];
8f90be4c
NC
1558 int phase = 0;
1559 int next;
88042663
RH
1560 int offset_ld = 0;
1561 int offset_st = 0;
1562 rtx x;
8f90be4c 1563
88042663
RH
1564 x = XEXP (dst_mem, 0);
1565 if (!REG_P (x))
1566 {
1567 x = force_reg (Pmode, x);
1568 dst_mem = replace_equiv_address (dst_mem, x);
1569 }
8f90be4c 1570
88042663
RH
1571 x = XEXP (src_mem, 0);
1572 if (!REG_P (x))
8f90be4c 1573 {
88042663
RH
1574 x = force_reg (Pmode, x);
1575 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1576 }
1577
88042663
RH
1578 active[0] = active[1] = false;
1579
8f90be4c
NC
1580 do
1581 {
8f90be4c 1582 next = phase;
88042663 1583 phase ^= 1;
8f90be4c
NC
1584
1585 if (size > 0)
1586 {
88042663
RH
1587 int next_amount;
1588
1589 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1590 next_amount = MIN (next_amount, align);
1591
1592 amount[next] = next_amount;
1593 mode[next] = mode_from_align[next_amount];
1594 temp[next] = gen_reg_rtx (mode[next]);
1595
1596 x = adjust_address (src_mem, mode[next], offset_ld);
1597 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1598
1599 offset_ld += next_amount;
1600 size -= next_amount;
1601 active[next] = true;
8f90be4c
NC
1602 }
1603
1604 if (active[phase])
1605 {
88042663 1606 active[phase] = false;
8f90be4c 1607
88042663
RH
1608 x = adjust_address (dst_mem, mode[phase], offset_st);
1609 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1610
8f90be4c
NC
1611 offset_st += amount[phase];
1612 }
1613 }
1614 while (active[next]);
1615}
1616
88042663
RH
1617bool
1618mcore_expand_block_move (rtx *operands)
8f90be4c 1619{
88042663
RH
1620 HOST_WIDE_INT align, bytes, max;
1621
1622 if (GET_CODE (operands[2]) != CONST_INT)
1623 return false;
1624
1625 bytes = INTVAL (operands[2]);
1626 align = INTVAL (operands[3]);
8f90be4c 1627
88042663
RH
1628 if (bytes <= 0)
1629 return false;
1630 if (align > 4)
1631 align = 4;
1632
1633 switch (align)
8f90be4c 1634 {
88042663
RH
1635 case 4:
1636 if (bytes & 1)
1637 max = 4*4;
1638 else if (bytes & 3)
1639 max = 8*4;
1640 else
1641 max = 16*4;
1642 break;
1643 case 2:
1644 max = 4*2;
1645 break;
1646 case 1:
1647 max = 4*1;
1648 break;
1649 default:
6e1f65b5 1650 gcc_unreachable ();
88042663
RH
1651 }
1652
1653 if (bytes <= max)
1654 {
1655 block_move_sequence (operands[0], operands[1], bytes, align);
1656 return true;
8f90be4c
NC
1657 }
1658
88042663 1659 return false;
8f90be4c
NC
1660}
1661\f
1662
1663/* Code to generate prologue and epilogue sequences. */
1664static int number_of_regs_before_varargs;
4816b8e4 1665
bd5bd7ac 1666/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1667 for a varargs function. */
1668static int current_function_anonymous_args;
1669
8f90be4c
NC
1670#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1671#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1672#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1673
8f90be4c 1674static void
08903e08 1675layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1676{
1677 int n;
1678 unsigned int i;
1679 int nbytes;
1680 int regarg;
1681 int localregarg;
8f90be4c
NC
1682 int outbounds;
1683 unsigned int growths;
1684 int step;
1685
1686 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1687 was passed partially in registers and partially on the stack. */
38173d38 1688 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1689
1690 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1691 if (current_function_anonymous_args)
1692 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1693
1694 infp->arg_size = nbytes;
1695
1696 /* How much space to save non-volatile registers we stomp. */
1697 infp->reg_mask = calc_live_regs (& n);
1698 infp->reg_size = n * 4;
1699
14bc6742 1700 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1701 infp->local_size = get_frame_size ();
38173d38 1702 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1703
1704 /* Make sure we have a whole number of words for the locals. */
1705 if (infp->local_size % STACK_BYTES)
1706 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1707
1708 /* Only thing we know we have to pad is the outbound space, since
1709 we've aligned our locals assuming that base of locals is aligned. */
1710 infp->pad_local = 0;
1711 infp->pad_reg = 0;
1712 infp->pad_outbound = 0;
1713 if (infp->outbound_size % STACK_BYTES)
1714 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1715
1716 /* Now we see how we want to stage the prologue so that it does
1717 the most appropriate stack growth and register saves to either:
1718 (1) run fast,
1719 (2) reduce instruction space, or
1720 (3) reduce stack space. */
b6a1cbae 1721 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1722 infp->growth[i] = 0;
1723
1724 regarg = infp->reg_size + infp->arg_size;
1725 localregarg = infp->local_size + regarg;
8f90be4c
NC
1726 outbounds = infp->outbound_size + infp->pad_outbound;
1727 growths = 0;
1728
1729 /* XXX: Consider one where we consider localregarg + outbound too! */
1730
1731 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1732 use stw's with offsets and buy the frame in one shot. */
1733 if (localregarg <= ADDI_REACH
1734 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1735 {
1736 /* Make sure we'll be aligned. */
1737 if (localregarg % STACK_BYTES)
1738 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1739
1740 step = localregarg + infp->pad_reg;
1741 infp->reg_offset = infp->local_size;
1742
1743 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1744 {
1745 step += outbounds;
1746 infp->reg_offset += outbounds;
1747 outbounds = 0;
1748 }
1749
1750 infp->arg_offset = step - 4;
1751 infp->growth[growths++] = step;
1752 infp->reg_growth = growths;
1753 infp->local_growth = growths;
1754
4816b8e4 1755 /* If we haven't already folded it in. */
8f90be4c
NC
1756 if (outbounds)
1757 infp->growth[growths++] = outbounds;
1758
1759 goto finish;
1760 }
1761
1762 /* Frame can't be done with a single subi, but can be done with 2
1763 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1764 shift some of the stack purchase into the first subi, so both are
1765 single instructions. */
1766 if (localregarg <= STORE_REACH
1767 && (infp->local_size > ADDI_REACH)
1768 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1769 {
1770 int all;
1771
1772 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1773 if (localregarg % STACK_BYTES)
1774 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1775
1776 all = localregarg + infp->pad_reg + infp->pad_local;
1777 step = ADDI_REACH; /* As much up front as we can. */
1778 if (step > all)
1779 step = all;
1780
1781 /* XXX: Consider whether step will still be aligned; we believe so. */
1782 infp->arg_offset = step - 4;
1783 infp->growth[growths++] = step;
1784 infp->reg_growth = growths;
1785 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1786 all -= step;
1787
4816b8e4 1788 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1789 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1790 {
1791 all += outbounds;
1792 outbounds = 0;
1793 }
1794
4816b8e4 1795 /* Get the rest of the locals in place. */
8f90be4c
NC
1796 step = all;
1797 infp->growth[growths++] = step;
1798 infp->local_growth = growths;
1799 all -= step;
1800
819bfe0e 1801 gcc_assert (all == 0);
8f90be4c 1802
4816b8e4 1803 /* Finish off if we need to do so. */
8f90be4c
NC
1804 if (outbounds)
1805 infp->growth[growths++] = outbounds;
1806
1807 goto finish;
1808 }
1809
1810 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1811 Then we buy the rest of the frame in 1 or 2 steps depending on
1812 whether we need a frame pointer. */
1813 if ((regarg % STACK_BYTES) == 0)
1814 {
1815 infp->growth[growths++] = regarg;
1816 infp->reg_growth = growths;
1817 infp->arg_offset = regarg - 4;
1818 infp->reg_offset = 0;
1819
1820 if (infp->local_size % STACK_BYTES)
1821 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1822
1823 step = infp->local_size + infp->pad_local;
1824
1825 if (!frame_pointer_needed)
1826 {
1827 step += outbounds;
1828 outbounds = 0;
1829 }
1830
1831 infp->growth[growths++] = step;
1832 infp->local_growth = growths;
1833
4816b8e4 1834 /* If there's any left to be done. */
8f90be4c
NC
1835 if (outbounds)
1836 infp->growth[growths++] = outbounds;
1837
1838 goto finish;
1839 }
1840
1841 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1842 -- regarg is not aligned, but it's a small number of registers;
1843 use some of localsize so that regarg is aligned and then
1844 save the registers. */
8f90be4c
NC
1845
1846 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1847 -- does not optimize space consumption.
1848 -- does not attempt to optimize instruction counts.
1849 -- but it is safe for all alignments. */
8f90be4c
NC
1850 if (regarg % STACK_BYTES != 0)
1851 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1852
1853 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1854 infp->reg_growth = growths;
1855 infp->arg_offset = infp->growth[0] - 4;
1856 infp->reg_offset = 0;
1857
1858 if (frame_pointer_needed)
1859 {
1860 if (infp->local_size % STACK_BYTES != 0)
1861 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1862
1863 infp->growth[growths++] = infp->local_size + infp->pad_local;
1864 infp->local_growth = growths;
1865
1866 infp->growth[growths++] = outbounds;
1867 }
1868 else
1869 {
1870 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1871 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1872
1873 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1874 infp->local_growth = growths;
1875 }
1876
f27cd94d 1877 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1878 finish:
819bfe0e
JM
1879 gcc_assert (infp->reg_offset >= 0);
1880 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1881
1882 for (i = 0; i < growths; i++)
6e1f65b5 1883 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1884}
1885
1886/* Define the offset between two registers, one to be eliminated, and
1887 the other its replacement, at the start of a routine. */
4816b8e4 1888
8f90be4c 1889int
08903e08 1890mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1891{
1892 int above_frame;
1893 int below_frame;
1894 struct mcore_frame fi;
1895
1896 layout_mcore_frame (& fi);
1897
1898 /* fp to ap */
1899 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1900 /* sp to fp */
1901 below_frame = fi.outbound_size + fi.pad_outbound;
1902
1903 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1904 return above_frame;
1905
1906 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1907 return above_frame + below_frame;
1908
1909 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1910 return below_frame;
1911
6e1f65b5 1912 gcc_unreachable ();
8f90be4c
NC
1913}
1914
4816b8e4
NC
1915/* Keep track of some information about varargs for the prolog. */
1916
09a2b93a 1917static void
d5cc9181 1918mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
08903e08 1919 enum machine_mode mode, tree type,
09a2b93a
KH
1920 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1921 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1922{
d5cc9181
JR
1923 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1924
8f90be4c
NC
1925 current_function_anonymous_args = 1;
1926
1927 /* We need to know how many argument registers are used before
1928 the varargs start, so that we can push the remaining argument
1929 registers during the prologue. */
09a2b93a 1930 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1931
dab66575 1932 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1933 Until I can find it this workaround always pushes the
1934 last named argument onto the stack. */
09a2b93a 1935 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1936
1937 /* The last named argument may be split between argument registers
1938 and the stack. Allow for this here. */
1939 if (number_of_regs_before_varargs > NPARM_REGS)
1940 number_of_regs_before_varargs = NPARM_REGS;
1941}
1942
1943void
08903e08 1944mcore_expand_prolog (void)
8f90be4c
NC
1945{
1946 struct mcore_frame fi;
1947 int space_allocated = 0;
1948 int growth = 0;
1949
1950 /* Find out what we're doing. */
1951 layout_mcore_frame (&fi);
1952
1953 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1954 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1955
1956 if (TARGET_CG_DATA)
1957 {
1958 /* Emit a symbol for this routine's frame size. */
1959 rtx x;
8f90be4c
NC
1960
1961 x = DECL_RTL (current_function_decl);
1962
6e1f65b5 1963 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1964
1965 x = XEXP (x, 0);
1966
6e1f65b5 1967 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1968
04695783 1969 free (mcore_current_function_name);
8f90be4c 1970
1dcd444b 1971 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1972
1973 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1974
e3b5732b 1975 if (cfun->calls_alloca)
8f90be4c
NC
1976 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1977
1978 /* 970425: RBE:
1979 We're looking at how the 8byte alignment affects stack layout
1980 and where we had to pad things. This emits information we can
1981 extract which tells us about frame sizes and the like. */
1982 fprintf (asm_out_file,
1983 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1984 mcore_current_function_name,
1985 fi.arg_size, fi.reg_size, fi.reg_mask,
1986 fi.local_size, fi.outbound_size,
1987 frame_pointer_needed);
1988 }
1989
1990 if (mcore_naked_function_p ())
1991 return;
1992
1993 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 1994 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
1995
1996 /* If we have a parameter passed partially in regs and partially in memory,
1997 the registers will have been stored to memory already in function.c. So
1998 we only need to do something here for varargs functions. */
38173d38 1999 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2000 {
2001 int offset;
2002 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2003 int remaining = fi.arg_size;
2004
2005 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2006 {
2007 emit_insn (gen_movsi
f1c25d3b 2008 (gen_rtx_MEM (SImode,
0a81f074
RS
2009 plus_constant (Pmode, stack_pointer_rtx,
2010 offset)),
f1c25d3b 2011 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2012 }
2013 }
2014
4816b8e4 2015 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2016 if (growth < fi.reg_growth)
08903e08 2017 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2018
2019 if (fi.reg_size != 0)
2020 {
2021 int i;
2022 int offs = fi.reg_offset;
2023
2024 for (i = 15; i >= 0; i--)
2025 {
2026 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2027 {
2028 int first_reg = 15;
2029
2030 while (fi.reg_mask & (1 << first_reg))
2031 first_reg--;
2032 first_reg++;
2033
f1c25d3b
KH
2034 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2035 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2036 GEN_INT (16 - first_reg)));
2037
2038 i -= (15 - first_reg);
2039 offs += (16 - first_reg) * 4;
2040 }
2041 else if (fi.reg_mask & (1 << i))
2042 {
2043 emit_insn (gen_movsi
f1c25d3b 2044 (gen_rtx_MEM (SImode,
0a81f074
RS
2045 plus_constant (Pmode, stack_pointer_rtx,
2046 offs)),
f1c25d3b 2047 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2048 offs += 4;
2049 }
2050 }
2051 }
2052
2053 /* Figure the locals + outbounds. */
2054 if (frame_pointer_needed)
2055 {
2056 /* If we haven't already purchased to 'fp'. */
2057 if (growth < fi.local_growth)
08903e08 2058 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2059
2060 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2061
4816b8e4 2062 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2063 if (fi.growth[growth])
2064 output_stack_adjust (-1, fi.growth[growth++]);
2065 }
2066 else
2067 {
2068 if (growth < fi.local_growth)
08903e08 2069 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2070 if (fi.growth[growth])
2071 output_stack_adjust (-1, fi.growth[growth++]);
2072 }
2073}
2074
2075void
08903e08 2076mcore_expand_epilog (void)
8f90be4c
NC
2077{
2078 struct mcore_frame fi;
2079 int i;
2080 int offs;
2081 int growth = MAX_STACK_GROWS - 1 ;
2082
f27cd94d 2083
8f90be4c
NC
2084 /* Find out what we're doing. */
2085 layout_mcore_frame(&fi);
2086
2087 if (mcore_naked_function_p ())
2088 return;
f27cd94d 2089
8f90be4c
NC
2090 /* If we had a frame pointer, restore the sp from that. */
2091 if (frame_pointer_needed)
2092 {
2093 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2094 growth = fi.local_growth - 1;
2095 }
2096 else
2097 {
2098 /* XXX: while loop should accumulate and do a single sell. */
2099 while (growth >= fi.local_growth)
2100 {
2101 if (fi.growth[growth] != 0)
2102 output_stack_adjust (1, fi.growth[growth]);
2103 growth--;
2104 }
2105 }
2106
2107 /* Make sure we've shrunk stack back to the point where the registers
2108 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2109 register save information back off the stack. */
8f90be4c
NC
2110 while (growth >= fi.reg_growth)
2111 output_stack_adjust ( 1, fi.growth[growth--]);
2112
2113 offs = fi.reg_offset;
2114
2115 for (i = 15; i >= 0; i--)
2116 {
2117 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2118 {
2119 int first_reg;
2120
2121 /* Find the starting register. */
2122 first_reg = 15;
2123
2124 while (fi.reg_mask & (1 << first_reg))
2125 first_reg--;
2126
2127 first_reg++;
2128
f1c25d3b
KH
2129 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2130 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2131 GEN_INT (16 - first_reg)));
2132
2133 i -= (15 - first_reg);
2134 offs += (16 - first_reg) * 4;
2135 }
2136 else if (fi.reg_mask & (1 << i))
2137 {
2138 emit_insn (gen_movsi
f1c25d3b
KH
2139 (gen_rtx_REG (SImode, i),
2140 gen_rtx_MEM (SImode,
0a81f074
RS
2141 plus_constant (Pmode, stack_pointer_rtx,
2142 offs))));
8f90be4c
NC
2143 offs += 4;
2144 }
2145 }
2146
2147 /* Give back anything else. */
dab66575 2148 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2149 while (growth >= 0)
2150 output_stack_adjust ( 1, fi.growth[growth--]);
2151}
2152\f
2153/* This code is borrowed from the SH port. */
2154
2155/* The MCORE cannot load a large constant into a register, constants have to
2156 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2157 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2158 means that we often have to dump a constant inside a function, and
2159 generate code to branch around it.
2160
2161 It is important to minimize this, since the branches will slow things
2162 down and make things bigger.
2163
2164 Worst case code looks like:
2165
2166 lrw L1,r0
2167 br L2
2168 align
2169 L1: .long value
2170 L2:
2171 ..
2172
2173 lrw L3,r0
2174 br L4
2175 align
2176 L3: .long value
2177 L4:
2178 ..
2179
2180 We fix this by performing a scan before scheduling, which notices which
2181 instructions need to have their operands fetched from the constant table
2182 and builds the table.
2183
2184 The algorithm is:
2185
2186 scan, find an instruction which needs a pcrel move. Look forward, find the
2187 last barrier which is within MAX_COUNT bytes of the requirement.
2188 If there isn't one, make one. Process all the instructions between
2189 the find and the barrier.
2190
2191 In the above example, we can tell that L3 is within 1k of L1, so
2192 the first move can be shrunk from the 2 insn+constant sequence into
2193 just 1 insn, and the constant moved to L3 to make:
2194
2195 lrw L1,r0
2196 ..
2197 lrw L3,r0
2198 bra L4
2199 align
2200 L3:.long value
2201 L4:.long value
2202
2203 Then the second move becomes the target for the shortening process. */
2204
2205typedef struct
2206{
2207 rtx value; /* Value in table. */
2208 rtx label; /* Label of value. */
2209} pool_node;
2210
2211/* The maximum number of constants that can fit into one pool, since
2212 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2213 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2214 we need to add a branch/align before the constant pool. */
2215
2216#define MAX_COUNT 1016
2217#define MAX_POOL_SIZE (MAX_COUNT/4)
2218static pool_node pool_vector[MAX_POOL_SIZE];
2219static int pool_size;
2220
2221/* Dump out any constants accumulated in the final pass. These
2222 will only be labels. */
4816b8e4 2223
f27cd94d 2224const char *
08903e08 2225mcore_output_jump_label_table (void)
8f90be4c
NC
2226{
2227 int i;
2228
2229 if (pool_size)
2230 {
2231 fprintf (asm_out_file, "\t.align 2\n");
2232
2233 for (i = 0; i < pool_size; i++)
2234 {
2235 pool_node * p = pool_vector + i;
2236
4977bab6 2237 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2238
2239 output_asm_insn (".long %0", &p->value);
2240 }
2241
2242 pool_size = 0;
2243 }
2244
2245 return "";
2246}
2247
8f90be4c 2248/* Check whether insn is a candidate for a conditional. */
4816b8e4 2249
8f90be4c 2250static cond_type
08903e08 2251is_cond_candidate (rtx insn)
8f90be4c
NC
2252{
2253 /* The only things we conditionalize are those that can be directly
2254 changed into a conditional. Only bother with SImode items. If
2255 we wanted to be a little more aggressive, we could also do other
4816b8e4 2256 modes such as DImode with reg-reg move or load 0. */
8f90be4c
NC
2257 if (GET_CODE (insn) == INSN)
2258 {
2259 rtx pat = PATTERN (insn);
2260 rtx src, dst;
2261
2262 if (GET_CODE (pat) != SET)
2263 return COND_NO;
2264
2265 dst = XEXP (pat, 0);
2266
2267 if ((GET_CODE (dst) != REG &&
2268 GET_CODE (dst) != SUBREG) ||
2269 GET_MODE (dst) != SImode)
2270 return COND_NO;
2271
2272 src = XEXP (pat, 1);
2273
2274 if ((GET_CODE (src) == REG ||
2275 (GET_CODE (src) == SUBREG &&
2276 GET_CODE (SUBREG_REG (src)) == REG)) &&
2277 GET_MODE (src) == SImode)
2278 return COND_MOV_INSN;
2279 else if (GET_CODE (src) == CONST_INT &&
2280 INTVAL (src) == 0)
2281 return COND_CLR_INSN;
2282 else if (GET_CODE (src) == PLUS &&
2283 (GET_CODE (XEXP (src, 0)) == REG ||
2284 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2285 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2286 GET_MODE (XEXP (src, 0)) == SImode &&
2287 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2288 INTVAL (XEXP (src, 1)) == 1)
2289 return COND_INC_INSN;
2290 else if (((GET_CODE (src) == MINUS &&
2291 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2292 INTVAL( XEXP (src, 1)) == 1) ||
2293 (GET_CODE (src) == PLUS &&
2294 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2295 INTVAL (XEXP (src, 1)) == -1)) &&
2296 (GET_CODE (XEXP (src, 0)) == REG ||
2297 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2298 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2299 GET_MODE (XEXP (src, 0)) == SImode)
2300 return COND_DEC_INSN;
2301
14bc6742 2302 /* Some insns that we don't bother with:
8f90be4c
NC
2303 (set (rx:DI) (ry:DI))
2304 (set (rx:DI) (const_int 0))
2305 */
2306
2307 }
2308 else if (GET_CODE (insn) == JUMP_INSN &&
2309 GET_CODE (PATTERN (insn)) == SET &&
2310 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2311 return COND_BRANCH_INSN;
2312
2313 return COND_NO;
2314}
2315
2316/* Emit a conditional version of insn and replace the old insn with the
2317 new one. Return the new insn if emitted. */
4816b8e4 2318
8f90be4c 2319static rtx
08903e08 2320emit_new_cond_insn (rtx insn, int cond)
8f90be4c
NC
2321{
2322 rtx c_insn = 0;
2323 rtx pat, dst, src;
2324 cond_type num;
2325
2326 if ((num = is_cond_candidate (insn)) == COND_NO)
2327 return NULL;
2328
2329 pat = PATTERN (insn);
2330
2331 if (GET_CODE (insn) == INSN)
2332 {
2333 dst = SET_DEST (pat);
2334 src = SET_SRC (pat);
2335 }
2336 else
cd4c46f3
KG
2337 {
2338 dst = JUMP_LABEL (insn);
2339 src = NULL_RTX;
2340 }
8f90be4c
NC
2341
2342 switch (num)
2343 {
2344 case COND_MOV_INSN:
2345 case COND_CLR_INSN:
2346 if (cond)
2347 c_insn = gen_movt0 (dst, src, dst);
2348 else
2349 c_insn = gen_movt0 (dst, dst, src);
2350 break;
2351
2352 case COND_INC_INSN:
2353 if (cond)
2354 c_insn = gen_incscc (dst, dst);
2355 else
2356 c_insn = gen_incscc_false (dst, dst);
2357 break;
2358
2359 case COND_DEC_INSN:
2360 if (cond)
2361 c_insn = gen_decscc (dst, dst);
2362 else
2363 c_insn = gen_decscc_false (dst, dst);
2364 break;
2365
2366 case COND_BRANCH_INSN:
2367 if (cond)
2368 c_insn = gen_branch_true (dst);
2369 else
2370 c_insn = gen_branch_false (dst);
2371 break;
2372
2373 default:
2374 return NULL;
2375 }
2376
2377 /* Only copy the notes if they exist. */
2378 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2379 {
2380 /* We really don't need to bother with the notes and links at this
2381 point, but go ahead and save the notes. This will help is_dead()
2382 when applying peepholes (links don't matter since they are not
2383 used any more beyond this point for the mcore). */
2384 REG_NOTES (c_insn) = REG_NOTES (insn);
2385 }
2386
2387 if (num == COND_BRANCH_INSN)
2388 {
2389 /* For jumps, we need to be a little bit careful and emit the new jump
2390 before the old one and to update the use count for the target label.
2391 This way, the barrier following the old (uncond) jump will get
2392 deleted, but the label won't. */
2393 c_insn = emit_jump_insn_before (c_insn, insn);
2394
2395 ++ LABEL_NUSES (dst);
2396
2397 JUMP_LABEL (c_insn) = dst;
2398 }
2399 else
2400 c_insn = emit_insn_after (c_insn, insn);
2401
2402 delete_insn (insn);
2403
2404 return c_insn;
2405}
2406
2407/* Attempt to change a basic block into a series of conditional insns. This
2408 works by taking the branch at the end of the 1st block and scanning for the
2409 end of the 2nd block. If all instructions in the 2nd block have cond.
2410 versions and the label at the start of block 3 is the same as the target
2411 from the branch at block 1, then conditionalize all insn in block 2 using
2412 the inverse condition of the branch at block 1. (Note I'm bending the
2413 definition of basic block here.)
2414
2415 e.g., change:
2416
2417 bt L2 <-- end of block 1 (delete)
2418 mov r7,r8
2419 addu r7,1
2420 br L3 <-- end of block 2
2421
2422 L2: ... <-- start of block 3 (NUSES==1)
2423 L3: ...
2424
2425 to:
2426
2427 movf r7,r8
2428 incf r7
2429 bf L3
2430
2431 L3: ...
2432
2433 we can delete the L2 label if NUSES==1 and re-apply the optimization
2434 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2435 if-then-else statement to be conditionalized. BRC */
8f90be4c 2436static rtx
08903e08 2437conditionalize_block (rtx first)
8f90be4c
NC
2438{
2439 rtx insn;
2440 rtx br_pat;
2441 rtx end_blk_1_br = 0;
2442 rtx end_blk_2_insn = 0;
2443 rtx start_blk_3_lab = 0;
2444 int cond;
2445 int br_lab_num;
2446 int blk_size = 0;
2447
2448
2449 /* Check that the first insn is a candidate conditional jump. This is
2450 the one that we'll eliminate. If not, advance to the next insn to
2451 try. */
2452 if (GET_CODE (first) != JUMP_INSN ||
2453 GET_CODE (PATTERN (first)) != SET ||
2454 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2455 return NEXT_INSN (first);
2456
2457 /* Extract some information we need. */
2458 end_blk_1_br = first;
2459 br_pat = PATTERN (end_blk_1_br);
2460
2461 /* Complement the condition since we use the reverse cond. for the insns. */
2462 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2463
2464 /* Determine what kind of branch we have. */
2465 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2466 {
2467 /* A normal branch, so extract label out of first arm. */
2468 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2469 }
2470 else
2471 {
2472 /* An inverse branch, so extract the label out of the 2nd arm
2473 and complement the condition. */
2474 cond = (cond == 0);
2475 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2476 }
2477
2478 /* Scan forward for the start of block 2: it must start with a
2479 label and that label must be the same as the branch target
2480 label from block 1. We don't care about whether block 2 actually
2481 ends with a branch or a label (an uncond. branch is
2482 conditionalizable). */
2483 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2484 {
2485 enum rtx_code code;
2486
2487 code = GET_CODE (insn);
2488
14bc6742 2489 /* Look for the label at the start of block 3. */
8f90be4c
NC
2490 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2491 break;
2492
2493 /* Skip barriers, notes, and conditionalizable insns. If the
2494 insn is not conditionalizable or makes this optimization fail,
2495 just return the next insn so we can start over from that point. */
2496 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2497 return NEXT_INSN (insn);
2498
112cdef5 2499 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2500 if (code == JUMP_INSN || code == INSN)
2501 {
2502 blk_size ++;
2503 end_blk_2_insn = insn;
2504 }
2505 }
2506
2507 if (!insn)
2508 return insn;
2509
2510 /* It is possible for this optimization to slow performance if the blocks
2511 are long. This really depends upon whether the branch is likely taken
2512 or not. If the branch is taken, we slow performance in many cases. But,
2513 if the branch is not taken, we always help performance (for a single
2514 block, but for a double block (i.e. when the optimization is re-applied)
2515 this is not true since the 'right thing' depends on the overall length of
2516 the collapsed block). As a compromise, don't apply this optimization on
2517 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2518 the best threshold depends on the latencies of the instructions (i.e.,
2519 the branch penalty). */
2520 if (optimize > 1 && blk_size > 2)
2521 return insn;
2522
2523 /* At this point, we've found the start of block 3 and we know that
2524 it is the destination of the branch from block 1. Also, all
2525 instructions in the block 2 are conditionalizable. So, apply the
2526 conditionalization and delete the branch. */
2527 start_blk_3_lab = insn;
2528
2529 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2530 insn = NEXT_INSN (insn))
2531 {
2532 rtx newinsn;
2533
2534 if (INSN_DELETED_P (insn))
2535 continue;
2536
14bc6742 2537 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2538 if ((newinsn = emit_new_cond_insn (insn, cond)))
2539 {
2540 if (end_blk_2_insn == insn)
2541 end_blk_2_insn = newinsn;
2542
2543 insn = newinsn;
2544 }
2545 }
2546
2547 /* Note whether we will delete the label starting blk 3 when the jump
2548 gets deleted. If so, we want to re-apply this optimization at the
2549 last real instruction right before the label. */
2550 if (LABEL_NUSES (start_blk_3_lab) == 1)
2551 {
2552 start_blk_3_lab = 0;
2553 }
2554
2555 /* ??? we probably should redistribute the death notes for this insn, esp.
2556 the death of cc, but it doesn't really matter this late in the game.
2557 The peepholes all use is_dead() which will find the correct death
2558 regardless of whether there is a note. */
2559 delete_insn (end_blk_1_br);
2560
2561 if (! start_blk_3_lab)
2562 return end_blk_2_insn;
2563
4816b8e4 2564 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2565 return NEXT_INSN (start_blk_3_lab);
2566}
2567
2568/* Apply the conditionalization of blocks optimization. This is the
2569 outer loop that traverses through the insns scanning for a branch
2570 that signifies an opportunity to apply the optimization. Note that
2571 this optimization is applied late. If we could apply it earlier,
2572 say before cse 2, it may expose more optimization opportunities.
2573 but, the pay back probably isn't really worth the effort (we'd have
2574 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2575 in before cse 2). */
2576
8f90be4c 2577static void
08903e08 2578conditionalize_optimization (void)
8f90be4c
NC
2579{
2580 rtx insn;
2581
18dbd950 2582 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2583 continue;
2584}
2585
18dbd950 2586/* This is to handle loads from the constant pool. */
4816b8e4 2587
18dbd950 2588static void
08903e08 2589mcore_reorg (void)
8f90be4c
NC
2590{
2591 /* Reset this variable. */
2592 current_function_anonymous_args = 0;
2593
8f90be4c
NC
2594 if (optimize == 0)
2595 return;
2596
2597 /* Conditionalize blocks where we can. */
18dbd950 2598 conditionalize_optimization ();
8f90be4c
NC
2599
2600 /* Literal pool generation is now pushed off until the assembler. */
2601}
2602
2603\f
f0f4da32 2604/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2605
f0f4da32 2606bool
08903e08 2607mcore_r15_operand_p (rtx x)
f0f4da32
RS
2608{
2609 switch (GET_CODE (x))
2610 {
2611 case CONST_INT:
2612 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2613
f0f4da32
RS
2614 case REG:
2615 case SUBREG:
2616 case MEM:
2617 return 1;
2618
2619 default:
2620 return 0;
2621 }
2622}
2623
0a2aaacc 2624/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2625 directly move X into it, use r1-r14 as a temporary. */
08903e08 2626
f0f4da32 2627enum reg_class
0a2aaacc 2628mcore_secondary_reload_class (enum reg_class rclass,
08903e08 2629 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2630{
0a2aaacc 2631 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2632 && !mcore_r15_operand_p (x))
2633 return LRW_REGS;
2634 return NO_REGS;
2635}
8f90be4c 2636
f0f4da32 2637/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2638 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2639 use LRW_REGS instead. */
08903e08 2640
8f90be4c 2641enum reg_class
0a2aaacc 2642mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2643{
0a2aaacc 2644 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2645 return LRW_REGS;
8f90be4c 2646
0a2aaacc 2647 return rclass;
8f90be4c
NC
2648}
2649
2650/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2651 register. Note that the current version doesn't worry about whether
2652 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2653 in r2 matches an SImode in r2. Might think in the future about whether
2654 we want to be able to say something about modes. */
08903e08 2655
8f90be4c 2656int
08903e08 2657mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2658{
14bc6742 2659 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2660 while (GET_CODE (x) == SUBREG)
2661 x = SUBREG_REG (x);
2662
2663 while (GET_CODE (y) == SUBREG)
2664 y = SUBREG_REG (y);
2665
2666 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2667 return 1;
2668
2669 return 0;
2670}
2671
c5387660
JM
2672static void
2673mcore_option_override (void)
8f90be4c 2674{
8f90be4c
NC
2675 /* Only the m340 supports little endian code. */
2676 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2677 target_flags |= MASK_M340;
8f90be4c 2678}
fac0f722 2679
8f90be4c 2680\f
8f90be4c
NC
2681/* Compute the number of word sized registers needed to
2682 hold a function argument of mode MODE and type TYPE. */
08903e08 2683
8f90be4c 2684int
586de218 2685mcore_num_arg_regs (enum machine_mode mode, const_tree type)
8f90be4c
NC
2686{
2687 int size;
2688
fe984136 2689 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2690 return 0;
2691
2692 if (type && mode == BLKmode)
2693 size = int_size_in_bytes (type);
2694 else
2695 size = GET_MODE_SIZE (mode);
2696
2697 return ROUND_ADVANCE (size);
2698}
2699
2700static rtx
586de218 2701handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2702{
2703 int size;
2704
696e78bf 2705 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2706 of bytes is passed packed into registers (or spilled onto the stack if
2707 not enough registers are available) with the last few bytes of the
2708 structure being packed, left-justified, into the last register/stack slot.
2709 GCC handles this correctly if the last word is in a stack slot, but we
2710 have to generate a special, PARALLEL RTX if the last word is in an
2711 argument register. */
2712 if (type
2713 && TYPE_MODE (type) == BLKmode
2714 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2715 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2716 && (size % UNITS_PER_WORD != 0)
2717 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2718 {
2719 rtx arg_regs [NPARM_REGS];
2720 int nregs;
2721 rtx result;
2722 rtvec rtvec;
2723
2724 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2725 {
2726 arg_regs [nregs] =
2727 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2728 GEN_INT (nregs * UNITS_PER_WORD));
2729 nregs ++;
2730 }
2731
2732 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2733 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2734 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2735 arg_regs[3], arg_regs[4], arg_regs[5]);
2736
2737 result = gen_rtx_PARALLEL (mode, rtvec);
2738 return result;
2739 }
2740
2741 return gen_rtx_REG (mode, reg);
2742}
2743
2744rtx
cde0f3fd 2745mcore_function_value (const_tree valtype, const_tree func)
8f90be4c
NC
2746{
2747 enum machine_mode mode;
2748 int unsigned_p;
2749
2750 mode = TYPE_MODE (valtype);
2751
cde0f3fd 2752 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2753 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2754
2755 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2756}
2757
2758/* Define where to put the arguments to a function.
2759 Value is zero to push the argument on the stack,
2760 or a hard register in which to store the argument.
2761
2762 MODE is the argument's machine mode.
2763 TYPE is the data type of the argument (as a tree).
2764 This is null for libcalls where that information may
2765 not be available.
2766 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2767 the preceding args and about the function being called.
2768 NAMED is nonzero if this argument is a named parameter
2769 (otherwise it is an extra parameter matching an ellipsis).
2770
2771 On MCore the first args are normally in registers
2772 and the rest are pushed. Any arg that starts within the first
2773 NPARM_REGS words is at least partially passed in a register unless
2774 its data type forbids. */
08903e08 2775
4665ac17 2776static rtx
d5cc9181 2777mcore_function_arg (cumulative_args_t cum, enum machine_mode mode,
4665ac17 2778 const_tree type, bool named)
8f90be4c
NC
2779{
2780 int arg_reg;
2781
88042663 2782 if (! named || mode == VOIDmode)
8f90be4c
NC
2783 return 0;
2784
fe984136 2785 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2786 return 0;
2787
d5cc9181 2788 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2789
2790 if (arg_reg < NPARM_REGS)
2791 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2792
2793 return 0;
2794}
2795
4665ac17 2796static void
d5cc9181 2797mcore_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4665ac17
NF
2798 const_tree type, bool named ATTRIBUTE_UNUSED)
2799{
d5cc9181
JR
2800 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2801
4665ac17
NF
2802 *cum = (ROUND_REG (*cum, mode)
2803 + (int)named * mcore_num_arg_regs (mode, type));
2804}
2805
c2ed6cf8
NF
2806static unsigned int
2807mcore_function_arg_boundary (enum machine_mode mode,
2808 const_tree type ATTRIBUTE_UNUSED)
2809{
2810 /* Doubles must be aligned to an 8 byte boundary. */
2811 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2812 ? BIGGEST_ALIGNMENT
2813 : PARM_BOUNDARY);
2814}
2815
78a52f11
RH
2816/* Returns the number of bytes of argument registers required to hold *part*
2817 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2818 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2819 registers, or entirely on the stack, then 0 is returned. CUM is the
2820 number of argument registers already used by earlier parameters to
2821 the function. */
08903e08 2822
78a52f11 2823static int
d5cc9181 2824mcore_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
78a52f11 2825 tree type, bool named)
8f90be4c 2826{
d5cc9181 2827 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2828
2829 if (named == 0)
2830 return 0;
2831
fe984136 2832 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2833 return 0;
2834
2835 /* REG is not the *hardware* register number of the register that holds
2836 the argument, it is the *argument* register number. So for example,
2837 the first argument to a function goes in argument register 0, which
2838 translates (for the MCore) into hardware register 2. The second
2839 argument goes into argument register 1, which translates into hardware
2840 register 3, and so on. NPARM_REGS is the number of argument registers
2841 supported by the target, not the maximum hardware register number of
2842 the target. */
2843 if (reg >= NPARM_REGS)
2844 return 0;
2845
2846 /* If the argument fits entirely in registers, return 0. */
2847 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2848 return 0;
2849
2850 /* The argument overflows the number of available argument registers.
2851 Compute how many argument registers have not yet been assigned to
2852 hold an argument. */
2853 reg = NPARM_REGS - reg;
2854
2855 /* Return partially in registers and partially on the stack. */
78a52f11 2856 return reg * UNITS_PER_WORD;
8f90be4c
NC
2857}
2858\f
a0ab749a 2859/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2860
8f90be4c 2861int
08903e08 2862mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2863{
2864 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2865}
2866
a0ab749a 2867/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2868
8f90be4c 2869int
08903e08 2870mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2871{
2872 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2873}
2874
2875/* Mark a DECL as being dllexport'd. */
08903e08 2876
8f90be4c 2877static void
08903e08 2878mcore_mark_dllexport (tree decl)
8f90be4c 2879{
cbd3488b 2880 const char * oldname;
8f90be4c
NC
2881 char * newname;
2882 rtx rtlname;
2883 tree idp;
2884
2885 rtlname = XEXP (DECL_RTL (decl), 0);
2886
6e1f65b5
NS
2887 if (GET_CODE (rtlname) == MEM)
2888 rtlname = XEXP (rtlname, 0);
2889 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2890 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2891
2892 if (mcore_dllexport_name_p (oldname))
2893 return; /* Already done. */
2894
5ead67f6 2895 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2896 sprintf (newname, "@e.%s", oldname);
2897
2898 /* We pass newname through get_identifier to ensure it has a unique
2899 address. RTL processing can sometimes peek inside the symbol ref
2900 and compare the string's addresses to see if two symbols are
2901 identical. */
2902 /* ??? At least I think that's why we do this. */
2903 idp = get_identifier (newname);
2904
2905 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2906 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2907}
2908
2909/* Mark a DECL as being dllimport'd. */
08903e08 2910
8f90be4c 2911static void
08903e08 2912mcore_mark_dllimport (tree decl)
8f90be4c 2913{
cbd3488b 2914 const char * oldname;
8f90be4c
NC
2915 char * newname;
2916 tree idp;
2917 rtx rtlname;
2918 rtx newrtl;
2919
2920 rtlname = XEXP (DECL_RTL (decl), 0);
2921
6e1f65b5
NS
2922 if (GET_CODE (rtlname) == MEM)
2923 rtlname = XEXP (rtlname, 0);
2924 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2925 oldname = XSTR (rtlname, 0);
8f90be4c 2926
6e1f65b5
NS
2927 gcc_assert (!mcore_dllexport_name_p (oldname));
2928 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2929 return; /* Already done. */
2930
2931 /* ??? One can well ask why we're making these checks here,
2932 and that would be a good question. */
2933
2934 /* Imported variables can't be initialized. */
2935 if (TREE_CODE (decl) == VAR_DECL
2936 && !DECL_VIRTUAL_P (decl)
2937 && DECL_INITIAL (decl))
2938 {
dee15844 2939 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2940 return;
2941 }
2942
2943 /* `extern' needn't be specified with dllimport.
2944 Specify `extern' now and hope for the best. Sigh. */
2945 if (TREE_CODE (decl) == VAR_DECL
2946 /* ??? Is this test for vtables needed? */
2947 && !DECL_VIRTUAL_P (decl))
2948 {
2949 DECL_EXTERNAL (decl) = 1;
2950 TREE_PUBLIC (decl) = 1;
2951 }
2952
5ead67f6 2953 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2954 sprintf (newname, "@i.__imp_%s", oldname);
2955
2956 /* We pass newname through get_identifier to ensure it has a unique
2957 address. RTL processing can sometimes peek inside the symbol ref
2958 and compare the string's addresses to see if two symbols are
2959 identical. */
2960 /* ??? At least I think that's why we do this. */
2961 idp = get_identifier (newname);
2962
f1c25d3b
KH
2963 newrtl = gen_rtx_MEM (Pmode,
2964 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2965 IDENTIFIER_POINTER (idp)));
2966 XEXP (DECL_RTL (decl), 0) = newrtl;
2967}
2968
2969static int
08903e08 2970mcore_dllexport_p (tree decl)
8f90be4c
NC
2971{
2972 if ( TREE_CODE (decl) != VAR_DECL
2973 && TREE_CODE (decl) != FUNCTION_DECL)
2974 return 0;
2975
91d231cb 2976 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2977}
2978
2979static int
08903e08 2980mcore_dllimport_p (tree decl)
8f90be4c
NC
2981{
2982 if ( TREE_CODE (decl) != VAR_DECL
2983 && TREE_CODE (decl) != FUNCTION_DECL)
2984 return 0;
2985
91d231cb 2986 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2987}
2988
fb49053f 2989/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 2990 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
2991
2992static void
08903e08 2993mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 2994{
8f90be4c
NC
2995 /* Mark the decl so we can tell from the rtl whether the object is
2996 dllexport'd or dllimport'd. */
2997 if (mcore_dllexport_p (decl))
2998 mcore_mark_dllexport (decl);
2999 else if (mcore_dllimport_p (decl))
3000 mcore_mark_dllimport (decl);
3001
3002 /* It might be that DECL has already been marked as dllimport, but
3003 a subsequent definition nullified that. The attribute is gone
3004 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3005 else if ((TREE_CODE (decl) == FUNCTION_DECL
3006 || TREE_CODE (decl) == VAR_DECL)
3007 && DECL_RTL (decl) != NULL_RTX
3008 && GET_CODE (DECL_RTL (decl)) == MEM
3009 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3010 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3011 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3012 {
3cce094d 3013 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3014 tree idp = get_identifier (oldname + 9);
f1c25d3b 3015 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3016
3017 XEXP (DECL_RTL (decl), 0) = newrtl;
3018
3019 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3020 ??? We leave these alone for now. */
3021 }
3022}
3023
772c5265
RH
3024/* Undo the effects of the above. */
3025
3026static const char *
08903e08 3027mcore_strip_name_encoding (const char * str)
772c5265
RH
3028{
3029 return str + (str[0] == '@' ? 3 : 0);
3030}
3031
8f90be4c
NC
3032/* MCore specific attribute support.
3033 dllexport - for exporting a function/variable that will live in a dll
3034 dllimport - for importing a function/variable from a dll
3035 naked - do not create a function prologue/epilogue. */
8f90be4c 3036
91d231cb
JM
3037/* Handle a "naked" attribute; arguments as in
3038 struct attribute_spec.handler. */
08903e08 3039
91d231cb 3040static tree
08903e08
SB
3041mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3042 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3043{
d45eae79 3044 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3045 {
29d08eba
JM
3046 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3047 name);
91d231cb 3048 *no_add_attrs = true;
8f90be4c
NC
3049 }
3050
91d231cb 3051 return NULL_TREE;
8f90be4c
NC
3052}
3053
ae46c4e0
RH
3054/* ??? It looks like this is PE specific? Oh well, this is what the
3055 old code did as well. */
8f90be4c 3056
ae46c4e0 3057static void
08903e08 3058mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3059{
3060 int len;
0139adca 3061 const char * name;
8f90be4c 3062 char * string;
f27cd94d 3063 const char * prefix;
8f90be4c
NC
3064
3065 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3066
3067 /* Strip off any encoding in name. */
772c5265 3068 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3069
3070 /* The object is put in, for example, section .text$foo.
3071 The linker will then ultimately place them in .text
3072 (everything from the $ on is stripped). */
3073 if (TREE_CODE (decl) == FUNCTION_DECL)
3074 prefix = ".text$";
f710504c 3075 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3076 section might have relocs against it. */
4e4d733e 3077 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3078 prefix = ".rdata$";
3079 else
3080 prefix = ".data$";
3081
3082 len = strlen (name) + strlen (prefix);
5ead67f6 3083 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3084
3085 sprintf (string, "%s%s", prefix, name);
3086
3087 DECL_SECTION_NAME (decl) = build_string (len, string);
3088}
3089
3090int
08903e08 3091mcore_naked_function_p (void)
8f90be4c 3092{
91d231cb 3093 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3094}
7c262518 3095
d45eae79
SL
3096static bool
3097mcore_warn_func_return (tree decl)
3098{
3099 /* Naked functions are implemented entirely in assembly, including the
3100 return sequence, so suppress warnings about this. */
3101 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3102}
3103
ede75ee8 3104#ifdef OBJECT_FORMAT_ELF
7c262518 3105static void
c18a5b6c
MM
3106mcore_asm_named_section (const char *name,
3107 unsigned int flags ATTRIBUTE_UNUSED,
3108 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3109{
3110 fprintf (asm_out_file, "\t.section %s\n", name);
3111}
ede75ee8 3112#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3113
dc7efe6e
KH
3114/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3115
09a2b93a
KH
3116static void
3117mcore_external_libcall (rtx fun)
3118{
3119 fprintf (asm_out_file, "\t.import\t");
3120 assemble_name (asm_out_file, XSTR (fun, 0));
3121 fprintf (asm_out_file, "\n");
3122}
3123
dc7efe6e
KH
3124/* Worker function for TARGET_RETURN_IN_MEMORY. */
3125
09a2b93a 3126static bool
586de218 3127mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3128{
586de218 3129 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3130 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3131}
71e0af3c
RH
3132
3133/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3134 Output assembler code for a block containing the constant parts
3135 of a trampoline, leaving space for the variable parts.
3136
3137 On the MCore, the trampoline looks like:
3138 lrw r1, function
3139 lrw r13, area
3140 jmp r13
3141 or r0, r0
3142 .literals */
3143
3144static void
3145mcore_asm_trampoline_template (FILE *f)
3146{
3147 fprintf (f, "\t.short 0x7102\n");
3148 fprintf (f, "\t.short 0x7d02\n");
3149 fprintf (f, "\t.short 0x00cd\n");
3150 fprintf (f, "\t.short 0x1e00\n");
3151 fprintf (f, "\t.long 0\n");
3152 fprintf (f, "\t.long 0\n");
3153}
3154
3155/* Worker function for TARGET_TRAMPOLINE_INIT. */
3156
3157static void
3158mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3159{
3160 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3161 rtx mem;
3162
3163 emit_block_move (m_tramp, assemble_trampoline_template (),
3164 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3165
3166 mem = adjust_address (m_tramp, SImode, 8);
3167 emit_move_insn (mem, chain_value);
3168 mem = adjust_address (m_tramp, SImode, 12);
3169 emit_move_insn (mem, fnaddr);
3170}
1a627b35
RS
3171
3172/* Implement TARGET_LEGITIMATE_CONSTANT_P
3173
3174 On the MCore, allow anything but a double. */
3175
3176static bool
3177mcore_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3178{
3179 return GET_CODE (x) != CONST_DOUBLE;
3180}