]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Use function_arg_info for TARGET_SETUP_INCOMING_ARGS
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
a5544970 2 Copyright (C) 1993-2019 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
8fcc61f8
RS
20#define IN_TARGET_CODE 1
21
bc27e96c 22#include "config.h"
4bd048ef 23#include "system.h"
4977bab6 24#include "coretypes.h"
c7131fb2 25#include "backend.h"
e11c4407 26#include "target.h"
4816b8e4 27#include "rtl.h"
e11c4407 28#include "tree.h"
c7131fb2 29#include "df.h"
4d0cdd0c 30#include "memmodel.h"
e11c4407
AM
31#include "tm_p.h"
32#include "stringpool.h"
314e6352 33#include "attribs.h"
e11c4407
AM
34#include "emit-rtl.h"
35#include "diagnostic-core.h"
d8a2d370
DN
36#include "stor-layout.h"
37#include "varasm.h"
d8a2d370 38#include "calls.h"
8f90be4c 39#include "mcore.h"
8f90be4c 40#include "output.h"
36566b39 41#include "explow.h"
8f90be4c 42#include "expr.h"
60393bbc 43#include "cfgrtl.h"
9b2b7279 44#include "builtins.h"
0f8012fb 45#include "regs.h"
8f90be4c 46
994c5d85 47/* This file should be included last. */
d58627a0
RS
48#include "target-def.h"
49
8f90be4c
NC
50/* For dumping information about frame sizes. */
51char * mcore_current_function_name = 0;
52long mcore_current_compilation_timestamp = 0;
53
54/* Global variables for machine-dependent things. */
55
8f90be4c
NC
56/* Provides the class number of the smallest class containing
57 reg number. */
5a82ecd9 58const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
59{
60 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
62 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
63 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
64 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
65};
66
f27cd94d
NC
67struct mcore_frame
68{
08903e08
SB
69 int arg_size; /* Stdarg spills (bytes). */
70 int reg_size; /* Non-volatile reg saves (bytes). */
71 int reg_mask; /* Non-volatile reg saves. */
72 int local_size; /* Locals. */
73 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
74 int pad_outbound;
75 int pad_local;
76 int pad_reg;
77 /* Describe the steps we'll use to grow it. */
08903e08 78#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
79 int growth[MAX_STACK_GROWS];
80 int arg_offset;
81 int reg_offset;
82 int reg_growth;
83 int local_growth;
84};
85
86typedef enum
87{
88 COND_NO,
89 COND_MOV_INSN,
90 COND_CLR_INSN,
91 COND_INC_INSN,
92 COND_DEC_INSN,
93 COND_BRANCH_INSN
94}
95cond_type;
96
08903e08
SB
97static void output_stack_adjust (int, int);
98static int calc_live_regs (int *);
e0416079 99static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
ef4bddc2 100static const char * output_inline_const (machine_mode, rtx *);
08903e08 101static void layout_mcore_frame (struct mcore_frame *);
e7056ca4
RS
102static void mcore_setup_incoming_varargs (cumulative_args_t,
103 const function_arg_info &,
104 int *, int);
08903e08 105static cond_type is_cond_candidate (rtx);
6251fe93 106static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
b32d5189 107static rtx_insn *conditionalize_block (rtx_insn *);
08903e08
SB
108static void conditionalize_optimization (void);
109static void mcore_reorg (void);
ef4bddc2 110static rtx handle_structs_in_regs (machine_mode, const_tree, int);
08903e08
SB
111static void mcore_mark_dllexport (tree);
112static void mcore_mark_dllimport (tree);
113static int mcore_dllexport_p (tree);
114static int mcore_dllimport_p (tree);
08903e08 115static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 116#ifdef OBJECT_FORMAT_ELF
08903e08 117static void mcore_asm_named_section (const char *,
c18a5b6c 118 unsigned int, tree);
ede75ee8 119#endif
349f851e 120static void mcore_print_operand (FILE *, rtx, int);
cc8ca59e 121static void mcore_print_operand_address (FILE *, machine_mode, rtx);
349f851e 122static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
123static void mcore_unique_section (tree, int);
124static void mcore_encode_section_info (tree, rtx, int);
125static const char *mcore_strip_name_encoding (const char *);
d96be87b
JBG
126static int mcore_const_costs (rtx, RTX_CODE);
127static int mcore_and_cost (rtx);
128static int mcore_ior_cost (rtx);
e548c9df 129static bool mcore_rtx_costs (rtx, machine_mode, int, int,
68f932c4 130 int *, bool);
09a2b93a 131static void mcore_external_libcall (rtx);
586de218 132static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 133static int mcore_arg_partial_bytes (cumulative_args_t,
a7c81bc1 134 const function_arg_info &);
d5cc9181 135static rtx mcore_function_arg (cumulative_args_t,
ef4bddc2 136 machine_mode,
4665ac17 137 const_tree, bool);
d5cc9181 138static void mcore_function_arg_advance (cumulative_args_t,
ef4bddc2 139 machine_mode,
4665ac17 140 const_tree, bool);
ef4bddc2 141static unsigned int mcore_function_arg_boundary (machine_mode,
c2ed6cf8 142 const_tree);
71e0af3c
RH
143static void mcore_asm_trampoline_template (FILE *);
144static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 145static bool mcore_warn_func_return (tree);
c5387660 146static void mcore_option_override (void);
ef4bddc2 147static bool mcore_legitimate_constant_p (machine_mode, rtx);
e7c6980e
AS
148static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
149 addr_space_t);
f939c3e6 150static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
99e1629f 151static bool mcore_modes_tieable_p (machine_mode, machine_mode);
5a82ecd9
ILT
152\f
153/* MCore specific attributes. */
154
155static const struct attribute_spec mcore_attribute_table[] =
156{
4849deb1
JJ
157 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
158 affects_type_identity, handler, exclude } */
159 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
160 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
161 { "naked", 0, 0, true, false, false, false,
162 mcore_handle_naked_attribute, NULL },
163 { NULL, 0, 0, false, false, false, false, NULL, NULL }
5a82ecd9 164};
672a6f42
NB
165\f
166/* Initialize the GCC target structure. */
09a2b93a
KH
167#undef TARGET_ASM_EXTERNAL_LIBCALL
168#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
169
b2ca3702 170#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
171#undef TARGET_MERGE_DECL_ATTRIBUTES
172#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
173#endif
174
301d03af 175#ifdef OBJECT_FORMAT_ELF
08903e08 176#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 177#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 178#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
179#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
180#endif
181
349f851e
NF
182#undef TARGET_PRINT_OPERAND
183#define TARGET_PRINT_OPERAND mcore_print_operand
184#undef TARGET_PRINT_OPERAND_ADDRESS
185#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
186#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
187#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
188
08903e08
SB
189#undef TARGET_ATTRIBUTE_TABLE
190#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
191#undef TARGET_ASM_UNIQUE_SECTION
192#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
193#undef TARGET_ASM_FUNCTION_RODATA_SECTION
194#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
195#undef TARGET_ENCODE_SECTION_INFO
196#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
197#undef TARGET_STRIP_NAME_ENCODING
198#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
199#undef TARGET_RTX_COSTS
200#define TARGET_RTX_COSTS mcore_rtx_costs
201#undef TARGET_ADDRESS_COST
b413068c 202#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
203#undef TARGET_MACHINE_DEPENDENT_REORG
204#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 205
cde0f3fd
PB
206#undef TARGET_PROMOTE_FUNCTION_MODE
207#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 208#undef TARGET_PROMOTE_PROTOTYPES
586de218 209#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 210
09a2b93a
KH
211#undef TARGET_RETURN_IN_MEMORY
212#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
213#undef TARGET_MUST_PASS_IN_STACK
214#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
215#undef TARGET_PASS_BY_REFERENCE
216#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
217#undef TARGET_ARG_PARTIAL_BYTES
218#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
219#undef TARGET_FUNCTION_ARG
220#define TARGET_FUNCTION_ARG mcore_function_arg
221#undef TARGET_FUNCTION_ARG_ADVANCE
222#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
223#undef TARGET_FUNCTION_ARG_BOUNDARY
224#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
225
226#undef TARGET_SETUP_INCOMING_VARARGS
227#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
228
71e0af3c
RH
229#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
230#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
231#undef TARGET_TRAMPOLINE_INIT
232#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
233
c5387660
JM
234#undef TARGET_OPTION_OVERRIDE
235#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 236
1a627b35
RS
237#undef TARGET_LEGITIMATE_CONSTANT_P
238#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
e7c6980e
AS
239#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
240#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
1a627b35 241
d81db636
SB
242#undef TARGET_LRA_P
243#define TARGET_LRA_P hook_bool_void_false
244
d45eae79
SL
245#undef TARGET_WARN_FUNC_RETURN
246#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
247
f939c3e6
RS
248#undef TARGET_HARD_REGNO_MODE_OK
249#define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
250
99e1629f
RS
251#undef TARGET_MODES_TIEABLE_P
252#define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
253
58e17cf8
RS
254#undef TARGET_CONSTANT_ALIGNMENT
255#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
256
1169a206
NC
257#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
258#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
259
f6897b10 260struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 261\f
8f90be4c
NC
262/* Adjust the stack and return the number of bytes taken to do it. */
263static void
08903e08 264output_stack_adjust (int direction, int size)
8f90be4c 265{
4816b8e4 266 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
267 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
268 {
f1c25d3b 269 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 270 rtx memref;
08903e08 271
8f90be4c
NC
272 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
273 do
274 {
275 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 276 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
277 MEM_VOLATILE_P (memref) = 1;
278 emit_insn (gen_movsi (memref, stack_pointer_rtx));
279 size -= mcore_stack_increment;
280 }
281 while (size > mcore_stack_increment);
282
4816b8e4
NC
283 /* SIZE is now the residual for the last adjustment,
284 which doesn't require a probe. */
8f90be4c
NC
285 }
286
287 if (size)
288 {
289 rtx insn;
290 rtx val = GEN_INT (size);
291
292 if (size > 32)
293 {
f1c25d3b 294 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
295 emit_insn (gen_movsi (nval, val));
296 val = nval;
297 }
298
299 if (direction > 0)
300 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
301 else
302 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
303
304 emit_insn (insn);
305 }
306}
307
4816b8e4
NC
308/* Work out the registers which need to be saved,
309 both as a mask and a count. */
310
8f90be4c 311static int
08903e08 312calc_live_regs (int * count)
8f90be4c
NC
313{
314 int reg;
315 int live_regs_mask = 0;
316
317 * count = 0;
318
319 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
320 {
6fb5fa3c 321 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
322 {
323 (*count)++;
324 live_regs_mask |= (1 << reg);
325 }
326 }
327
328 return live_regs_mask;
329}
330
331/* Print the operand address in x to the stream. */
4816b8e4 332
349f851e 333static void
cc8ca59e 334mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
8f90be4c
NC
335{
336 switch (GET_CODE (x))
337 {
338 case REG:
339 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
340 break;
341
342 case PLUS:
343 {
344 rtx base = XEXP (x, 0);
345 rtx index = XEXP (x, 1);
346
347 if (GET_CODE (base) != REG)
348 {
349 /* Ensure that BASE is a register (one of them must be). */
350 rtx temp = base;
351 base = index;
352 index = temp;
353 }
354
355 switch (GET_CODE (index))
356 {
357 case CONST_INT:
fd7b8952
KG
358 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
359 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
360 break;
361
362 default:
6e1f65b5 363 gcc_unreachable ();
8f90be4c
NC
364 }
365 }
366
367 break;
368
369 default:
370 output_addr_const (stream, x);
371 break;
372 }
373}
374
349f851e
NF
375static bool
376mcore_print_operand_punct_valid_p (unsigned char code)
377{
378 return (code == '.' || code == '#' || code == '*' || code == '^'
379 || code == '!');
380}
381
8f90be4c
NC
382/* Print operand x (an rtx) in assembler syntax to file stream
383 according to modifier code.
384
112cdef5 385 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
386 a double word value
387 'O' print a constant without the #
388 'M' print a constant as its negative
389 'P' print log2 of a power of two
390 'Q' print log2 of an inverse of a power of two
391 'U' print register for ldm/stm instruction
4816b8e4
NC
392 'X' print byte number for xtrbN instruction. */
393
349f851e 394static void
08903e08 395mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
396{
397 switch (code)
398 {
399 case 'N':
400 if (INTVAL(x) == -1)
401 fprintf (asm_out_file, "32");
402 else
403 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
404 break;
405 case 'P':
6e3a343d 406 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
407 break;
408 case 'Q':
409 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
410 break;
411 case 'O':
fd7b8952 412 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
413 break;
414 case 'M':
fd7b8952 415 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
416 break;
417 case 'R':
418 /* Next location along in memory or register. */
419 switch (GET_CODE (x))
420 {
421 case REG:
422 fputs (reg_names[REGNO (x) + 1], (stream));
423 break;
424 case MEM:
b72f00af 425 mcore_print_operand_address
cc8ca59e 426 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
427 break;
428 default:
6e1f65b5 429 gcc_unreachable ();
8f90be4c
NC
430 }
431 break;
432 case 'U':
433 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
434 reg_names[REGNO (x) + 3]);
435 break;
436 case 'x':
fd7b8952 437 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
438 break;
439 case 'X':
fd7b8952 440 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
441 break;
442
443 default:
444 switch (GET_CODE (x))
445 {
446 case REG:
447 fputs (reg_names[REGNO (x)], (stream));
448 break;
449 case MEM:
cc8ca59e 450 output_address (GET_MODE (x), XEXP (x, 0));
8f90be4c
NC
451 break;
452 default:
453 output_addr_const (stream, x);
454 break;
455 }
456 break;
457 }
458}
459
460/* What does a constant cost ? */
4816b8e4 461
3c50106f 462static int
08903e08 463mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 464{
6e3a343d 465 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
466
467 /* Easy constants. */
468 if ( CONST_OK_FOR_I (val)
469 || CONST_OK_FOR_M (val)
470 || CONST_OK_FOR_N (val)
471 || (code == PLUS && CONST_OK_FOR_L (val)))
472 return 1;
473 else if (code == AND
474 && ( CONST_OK_FOR_M (~val)
475 || CONST_OK_FOR_N (~val)))
476 return 2;
477 else if (code == PLUS
478 && ( CONST_OK_FOR_I (-val)
479 || CONST_OK_FOR_M (-val)
480 || CONST_OK_FOR_N (-val)))
481 return 2;
482
483 return 5;
484}
485
486/* What does an and instruction cost - we do this b/c immediates may
487 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
488 out. Otherwise we'll get bad code (multiple reloads of the same const). */
489
3c50106f 490static int
08903e08 491mcore_and_cost (rtx x)
8f90be4c 492{
6e3a343d 493 HOST_WIDE_INT val;
8f90be4c
NC
494
495 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
496 return 2;
497
498 val = INTVAL (XEXP (x, 1));
499
4816b8e4 500 /* Do it directly. */
8f90be4c
NC
501 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
502 return 2;
503 /* Takes one instruction to load. */
504 else if (const_ok_for_mcore (val))
505 return 3;
506 /* Takes two instructions to load. */
507 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
508 return 4;
509
4816b8e4 510 /* Takes a lrw to load. */
8f90be4c
NC
511 return 5;
512}
513
4816b8e4
NC
514/* What does an or cost - see and_cost(). */
515
3c50106f 516static int
08903e08 517mcore_ior_cost (rtx x)
8f90be4c 518{
6e3a343d 519 HOST_WIDE_INT val;
8f90be4c
NC
520
521 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
522 return 2;
523
524 val = INTVAL (XEXP (x, 1));
525
4816b8e4 526 /* Do it directly with bclri. */
8f90be4c
NC
527 if (CONST_OK_FOR_M (val))
528 return 2;
4816b8e4 529 /* Takes one instruction to load. */
8f90be4c
NC
530 else if (const_ok_for_mcore (val))
531 return 3;
4816b8e4 532 /* Takes two instructions to load. */
8f90be4c
NC
533 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
534 return 4;
535
4816b8e4 536 /* Takes a lrw to load. */
8f90be4c
NC
537 return 5;
538}
539
3c50106f 540static bool
e548c9df
AM
541mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
542 int opno ATTRIBUTE_UNUSED,
68f932c4 543 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f 544{
e548c9df
AM
545 int code = GET_CODE (x);
546
3c50106f
RH
547 switch (code)
548 {
549 case CONST_INT:
5a82ecd9 550 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
551 return true;
552 case CONST:
553 case LABEL_REF:
554 case SYMBOL_REF:
555 *total = 5;
556 return true;
557 case CONST_DOUBLE:
558 *total = 10;
559 return true;
560
561 case AND:
562 *total = COSTS_N_INSNS (mcore_and_cost (x));
563 return true;
564
565 case IOR:
566 *total = COSTS_N_INSNS (mcore_ior_cost (x));
567 return true;
568
569 case DIV:
570 case UDIV:
571 case MOD:
572 case UMOD:
573 case FLOAT:
574 case FIX:
575 *total = COSTS_N_INSNS (100);
576 return true;
577
578 default:
579 return false;
580 }
581}
582
f90b7a5a
PB
583/* Prepare the operands for a comparison. Return whether the branch/setcc
584 should reverse the operands. */
4816b8e4 585
f90b7a5a
PB
586bool
587mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 588{
f90b7a5a
PB
589 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
590 bool invert;
591
8f90be4c
NC
592 if (GET_CODE (op1) == CONST_INT)
593 {
6e3a343d 594 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
595
596 switch (code)
597 {
f90b7a5a
PB
598 case GTU:
599 /* Unsigned > 0 is the same as != 0; everything else is converted
600 below to LEU (reversed cmphs). */
601 if (val == 0)
602 code = NE;
603 break;
604
605 /* Check whether (LE A imm) can become (LT A imm + 1),
606 or (GT A imm) can become (GE A imm + 1). */
607 case GT:
8f90be4c
NC
608 case LE:
609 if (CONST_OK_FOR_J (val + 1))
610 {
f90b7a5a
PB
611 op1 = GEN_INT (val + 1);
612 code = code == LE ? LT : GE;
8f90be4c
NC
613 }
614 break;
615
616 default:
617 break;
618 }
619 }
f90b7a5a 620
8f90be4c
NC
621 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
622 op1 = force_reg (SImode, op1);
623
624 /* cmpnei: 0-31 (K immediate)
4816b8e4 625 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 626 invert = false;
8f90be4c
NC
627 switch (code)
628 {
4816b8e4 629 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 630 code = NE;
f90b7a5a 631 invert = true;
0c15dfc1 632 /* FALLTHRU */
4816b8e4
NC
633
634 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
635 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
636 op1 = force_reg (SImode, op1);
637 break;
638
4816b8e4 639 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 640 code = GT;
f90b7a5a 641 invert = true;
0c15dfc1 642 /* FALLTHRU */
4816b8e4
NC
643
644 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
645 if (GET_CODE (op1) == CONST_INT)
646 op1 = force_reg (SImode, op1);
647 break;
648
4816b8e4 649 case GE: /* Use inverted condition, cmplt. */
8f90be4c 650 code = LT;
f90b7a5a 651 invert = true;
0c15dfc1 652 /* FALLTHRU */
4816b8e4
NC
653
654 case LT: /* Use normal condition, cmplt. */
8f90be4c 655 if (GET_CODE (op1) == CONST_INT &&
08903e08 656 /* covered by btsti x,31. */
8f90be4c
NC
657 INTVAL (op1) != 0 &&
658 ! CONST_OK_FOR_J (INTVAL (op1)))
659 op1 = force_reg (SImode, op1);
660 break;
661
4816b8e4 662 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 663 /* We coped with unsigned > 0 above. */
6e1f65b5 664 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 665 code = LEU;
f90b7a5a 666 invert = true;
0c15dfc1 667 /* FALLTHRU */
4816b8e4 668
14bc6742 669 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
670 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
671 op1 = force_reg (SImode, op1);
672 break;
673
4816b8e4 674 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 675 code = GEU;
f90b7a5a 676 invert = true;
0c15dfc1 677 /* FALLTHRU */
4816b8e4
NC
678
679 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
680 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
681 op1 = force_reg (SImode, op1);
682 break;
683
684 default:
685 break;
686 }
687
f7df4a84 688 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
f90b7a5a 689 return invert;
8f90be4c
NC
690}
691
8f90be4c 692int
08903e08 693mcore_symbolic_address_p (rtx x)
8f90be4c
NC
694{
695 switch (GET_CODE (x))
696 {
697 case SYMBOL_REF:
698 case LABEL_REF:
699 return 1;
700 case CONST:
701 x = XEXP (x, 0);
702 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
703 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
704 && GET_CODE (XEXP (x, 1)) == CONST_INT);
705 default:
706 return 0;
707 }
708}
709
8f90be4c 710/* Functions to output assembly code for a function call. */
f27cd94d 711
8f90be4c 712char *
08903e08 713mcore_output_call (rtx operands[], int index)
8f90be4c
NC
714{
715 static char buffer[20];
716 rtx addr = operands [index];
717
718 if (REG_P (addr))
719 {
720 if (TARGET_CG_DATA)
721 {
6e1f65b5 722 gcc_assert (mcore_current_function_name);
8f90be4c
NC
723
724 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
725 "unknown", 1);
726 }
727
728 sprintf (buffer, "jsr\t%%%d", index);
729 }
730 else
731 {
732 if (TARGET_CG_DATA)
733 {
6e1f65b5
NS
734 gcc_assert (mcore_current_function_name);
735 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 736
6e1f65b5
NS
737 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
738 XSTR (addr, 0), 0);
8f90be4c
NC
739 }
740
741 sprintf (buffer, "jbsr\t%%%d", index);
742 }
743
744 return buffer;
745}
746
747/* Can we load a constant with a single instruction ? */
4816b8e4 748
54d58eaf 749int
6e3a343d 750const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
751{
752 if (value >= 0 && value <= 127)
753 return 1;
754
755 /* Try exact power of two. */
6e3a343d 756 if (CONST_OK_FOR_M (value))
8f90be4c
NC
757 return 1;
758
14bc6742 759 /* Try exact power of two - 1. */
6e3a343d 760 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
761 return 1;
762
763 return 0;
764}
765
766/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 767
8f90be4c 768int
6e3a343d 769mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 770{
6e3a343d 771 HOST_WIDE_INT x, y;
8f90be4c
NC
772
773 return try_constant_tricks (value, & x, & y) > 0;
774}
775
776/* Are we loading the constant using a not ? */
4816b8e4 777
8f90be4c 778int
6e3a343d 779mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 780{
6e3a343d 781 HOST_WIDE_INT x, y;
8f90be4c
NC
782
783 return try_constant_tricks (value, & x, & y) == 2;
784}
785
786/* Try tricks to load a constant inline and return the trick number if
787 success (0 is non-inlinable).
4816b8e4
NC
788
789 0: not inlinable
790 1: single instruction (do the usual thing)
791 2: single insn followed by a 'not'
792 3: single insn followed by a subi
793 4: single insn followed by an addi
794 5: single insn followed by rsubi
795 6: single insn followed by bseti
796 7: single insn followed by bclri
797 8: single insn followed by rotli
798 9: single insn followed by lsli
799 10: single insn followed by ixh
800 11: single insn followed by ixw. */
8f90be4c
NC
801
802static int
6e3a343d 803try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 804{
6e3a343d
NC
805 HOST_WIDE_INT i;
806 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
807
808 if (const_ok_for_mcore (value))
4816b8e4 809 return 1; /* Do the usual thing. */
8f90be4c 810
6e3a343d
NC
811 if (! TARGET_HARDLIT)
812 return 0;
813
814 if (const_ok_for_mcore (~value))
815 {
816 *x = ~value;
817 return 2;
818 }
819
820 for (i = 1; i <= 32; i++)
8f90be4c 821 {
6e3a343d 822 if (const_ok_for_mcore (value - i))
8f90be4c 823 {
6e3a343d
NC
824 *x = value - i;
825 *y = i;
826
827 return 3;
8f90be4c 828 }
6e3a343d
NC
829
830 if (const_ok_for_mcore (value + i))
8f90be4c 831 {
6e3a343d
NC
832 *x = value + i;
833 *y = i;
834
835 return 4;
8f90be4c 836 }
6e3a343d
NC
837 }
838
839 bit = 0x80000000ULL;
840
841 for (i = 0; i <= 31; i++)
842 {
843 if (const_ok_for_mcore (i - value))
8f90be4c 844 {
6e3a343d
NC
845 *x = i - value;
846 *y = i;
847
848 return 5;
8f90be4c 849 }
6e3a343d
NC
850
851 if (const_ok_for_mcore (value & ~bit))
8f90be4c 852 {
6e3a343d
NC
853 *y = bit;
854 *x = value & ~bit;
855 return 6;
8f90be4c 856 }
6e3a343d
NC
857
858 if (const_ok_for_mcore (value | bit))
8f90be4c 859 {
6e3a343d
NC
860 *y = ~bit;
861 *x = value | bit;
862
863 return 7;
8f90be4c 864 }
6e3a343d
NC
865
866 bit >>= 1;
867 }
868
869 shf = value;
870 rot = value;
871
872 for (i = 1; i < 31; i++)
873 {
874 int c;
875
876 /* MCore has rotate left. */
877 c = rot << 31;
878 rot >>= 1;
879 rot &= 0x7FFFFFFF;
880 rot |= c; /* Simulate rotate. */
881
882 if (const_ok_for_mcore (rot))
8f90be4c 883 {
6e3a343d
NC
884 *y = i;
885 *x = rot;
886
887 return 8;
888 }
889
890 if (shf & 1)
891 shf = 0; /* Can't use logical shift, low order bit is one. */
892
893 shf >>= 1;
894
895 if (shf != 0 && const_ok_for_mcore (shf))
896 {
897 *y = i;
898 *x = shf;
899
900 return 9;
8f90be4c
NC
901 }
902 }
6e3a343d
NC
903
904 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
905 {
906 *x = value / 3;
907
908 return 10;
909 }
910
911 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
912 {
913 *x = value / 5;
914
915 return 11;
916 }
8f90be4c
NC
917
918 return 0;
919}
920
8f90be4c
NC
921/* Check whether reg is dead at first. This is done by searching ahead
922 for either the next use (i.e., reg is live), a death note, or a set of
923 reg. Don't just use dead_or_set_p() since reload does not always mark
924 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
925 can ignore subregs by extracting the actual register. BRC */
926
8f90be4c 927int
b32d5189 928mcore_is_dead (rtx_insn *first, rtx reg)
8f90be4c 929{
b32d5189 930 rtx_insn *insn;
8f90be4c
NC
931
932 /* For mcore, subregs can't live independently of their parent regs. */
933 if (GET_CODE (reg) == SUBREG)
934 reg = SUBREG_REG (reg);
935
936 /* Dies immediately. */
937 if (dead_or_set_p (first, reg))
938 return 1;
939
940 /* Look for conclusive evidence of live/death, otherwise we have
941 to assume that it is live. */
942 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
943 {
b64925dc 944 if (JUMP_P (insn))
8f90be4c
NC
945 return 0; /* We lose track, assume it is alive. */
946
b64925dc 947 else if (CALL_P (insn))
8f90be4c
NC
948 {
949 /* Call's might use it for target or register parms. */
950 if (reg_referenced_p (reg, PATTERN (insn))
951 || find_reg_fusage (insn, USE, reg))
952 return 0;
953 else if (dead_or_set_p (insn, reg))
954 return 1;
955 }
b64925dc 956 else if (NONJUMP_INSN_P (insn))
8f90be4c
NC
957 {
958 if (reg_referenced_p (reg, PATTERN (insn)))
959 return 0;
960 else if (dead_or_set_p (insn, reg))
961 return 1;
962 }
963 }
964
1e5f1716 965 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
966 that control flow hid the use from us -- "I'm not dead yet". */
967 return 0;
968}
969
8f90be4c 970/* Count the number of ones in mask. */
4816b8e4 971
8f90be4c 972int
6e3a343d 973mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 974{
4816b8e4 975 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
976 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
977 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
978 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
979 mask = ((mask >> 8) + mask);
980
981 return (mask + (mask >> 16)) & 0xff;
982}
983
4816b8e4
NC
984/* Count the number of zeros in mask. */
985
8f90be4c 986int
6e3a343d 987mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
988{
989 return 32 - mcore_num_ones (mask);
990}
991
992/* Determine byte being masked. */
4816b8e4 993
8f90be4c 994int
08903e08 995mcore_byte_offset (unsigned int mask)
8f90be4c 996{
11f9ed1a 997 if (mask == 0x00ffffffL)
8f90be4c 998 return 0;
11f9ed1a 999 else if (mask == 0xff00ffffL)
8f90be4c 1000 return 1;
11f9ed1a 1001 else if (mask == 0xffff00ffL)
8f90be4c 1002 return 2;
11f9ed1a 1003 else if (mask == 0xffffff00L)
8f90be4c
NC
1004 return 3;
1005
1006 return -1;
1007}
1008
1009/* Determine halfword being masked. */
4816b8e4 1010
8f90be4c 1011int
08903e08 1012mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
1013{
1014 if (mask == 0x0000ffffL)
1015 return 0;
11f9ed1a 1016 else if (mask == 0xffff0000L)
8f90be4c
NC
1017 return 1;
1018
1019 return -1;
1020}
1021
1022/* Output a series of bseti's corresponding to mask. */
4816b8e4 1023
f27cd94d 1024const char *
08903e08 1025mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
1026{
1027 rtx out_operands[2];
1028 int bit;
1029
1030 out_operands[0] = dst;
1031
1032 for (bit = 0; bit < 32; bit++)
1033 {
1034 if ((mask & 0x1) == 0x1)
1035 {
1036 out_operands[1] = GEN_INT (bit);
1037
1038 output_asm_insn ("bseti\t%0,%1", out_operands);
1039 }
1040 mask >>= 1;
1041 }
1042
1043 return "";
1044}
1045
1046/* Output a series of bclri's corresponding to mask. */
4816b8e4 1047
f27cd94d 1048const char *
08903e08 1049mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1050{
1051 rtx out_operands[2];
1052 int bit;
1053
1054 out_operands[0] = dst;
1055
1056 for (bit = 0; bit < 32; bit++)
1057 {
1058 if ((mask & 0x1) == 0x0)
1059 {
1060 out_operands[1] = GEN_INT (bit);
1061
1062 output_asm_insn ("bclri\t%0,%1", out_operands);
1063 }
1064
1065 mask >>= 1;
1066 }
1067
1068 return "";
1069}
1070
1071/* Output a conditional move of two constants that are +/- 1 within each
1072 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1073 really worth the effort. */
4816b8e4 1074
f27cd94d 1075const char *
08903e08 1076mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1077{
6e3a343d
NC
1078 HOST_WIDE_INT load_value;
1079 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1080 rtx out_operands[4];
1081
1082 out_operands[0] = operands[0];
1083
4816b8e4 1084 /* Check to see which constant is loadable. */
8f90be4c
NC
1085 if (const_ok_for_mcore (INTVAL (operands[1])))
1086 {
1087 out_operands[1] = operands[1];
1088 out_operands[2] = operands[2];
1089 }
1090 else if (const_ok_for_mcore (INTVAL (operands[2])))
1091 {
1092 out_operands[1] = operands[2];
1093 out_operands[2] = operands[1];
1094
4816b8e4 1095 /* Complement test since constants are swapped. */
8f90be4c
NC
1096 cmp_t = (cmp_t == 0);
1097 }
1098 load_value = INTVAL (out_operands[1]);
1099 adjust_value = INTVAL (out_operands[2]);
1100
4816b8e4 1101 /* First output the test if folded into the pattern. */
8f90be4c
NC
1102
1103 if (test)
1104 output_asm_insn (test, operands);
1105
4816b8e4 1106 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1107 generated with a single instruction. maybe add general inlinable
1108 constants later (this will increase the # of patterns since the
4816b8e4 1109 instruction sequence has a different length attribute). */
8f90be4c
NC
1110 if (load_value >= 0 && load_value <= 127)
1111 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1112 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1113 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1114 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1115 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1116
4816b8e4 1117 /* Output the constant adjustment. */
8f90be4c
NC
1118 if (load_value > adjust_value)
1119 {
1120 if (cmp_t)
1121 output_asm_insn ("decf\t%0", out_operands);
1122 else
1123 output_asm_insn ("dect\t%0", out_operands);
1124 }
1125 else
1126 {
1127 if (cmp_t)
1128 output_asm_insn ("incf\t%0", out_operands);
1129 else
1130 output_asm_insn ("inct\t%0", out_operands);
1131 }
1132
1133 return "";
1134}
1135
1136/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1137 by an and (i.e. combine the not and the and into andn). BRC */
1138
f27cd94d 1139const char *
08903e08 1140mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1141{
6e3a343d 1142 HOST_WIDE_INT x, y;
8f90be4c 1143 rtx out_operands[3];
f27cd94d 1144 const char * load_op;
8f90be4c 1145 char buf[256];
6e1f65b5 1146 int trick_no;
8f90be4c 1147
6e1f65b5
NS
1148 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1149 gcc_assert (trick_no == 2);
8f90be4c
NC
1150
1151 out_operands[0] = operands[0];
6e3a343d 1152 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1153 out_operands[2] = operands[2];
1154
1155 if (x >= 0 && x <= 127)
1156 load_op = "movi\t%0,%1";
4816b8e4
NC
1157
1158 /* Try exact power of two. */
6e3a343d 1159 else if (CONST_OK_FOR_M (x))
8f90be4c 1160 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1161
1162 /* Try exact power of two - 1. */
6e3a343d 1163 else if (CONST_OK_FOR_N (x))
8f90be4c 1164 load_op = "bmaski\t%0,%N1";
4816b8e4 1165
6e3a343d
NC
1166 else
1167 {
1168 load_op = "BADMOVI-andn\t%0, %1";
1169 gcc_unreachable ();
1170 }
8f90be4c
NC
1171
1172 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1173 output_asm_insn (buf, out_operands);
1174
1175 return "";
1176}
1177
1178/* Output an inline constant. */
4816b8e4 1179
f27cd94d 1180static const char *
ef4bddc2 1181output_inline_const (machine_mode mode, rtx operands[])
8f90be4c 1182{
6e3a343d 1183 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1184 int trick_no;
1185 rtx out_operands[3];
1186 char buf[256];
1187 char load_op[256];
f27cd94d 1188 const char *dst_fmt;
6e3a343d 1189 HOST_WIDE_INT value;
8f90be4c
NC
1190
1191 value = INTVAL (operands[1]);
8f90be4c 1192
6e1f65b5
NS
1193 trick_no = try_constant_tricks (value, &x, &y);
1194 /* lrw's are handled separately: Large inlinable constants never get
1195 turned into lrw's. Our caller uses try_constant_tricks to back
1196 off to an lrw rather than calling this routine. */
1197 gcc_assert (trick_no != 0);
1198
8f90be4c
NC
1199 if (trick_no == 1)
1200 x = value;
1201
4816b8e4 1202 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1203 out_operands[0] = operands[0];
1204 out_operands[1] = GEN_INT (x);
1205
1206 if (trick_no > 2)
1207 out_operands[2] = GEN_INT (y);
1208
4816b8e4 1209 /* Select dst format based on mode. */
8f90be4c
NC
1210 if (mode == DImode && (! TARGET_LITTLE_END))
1211 dst_fmt = "%R0";
1212 else
1213 dst_fmt = "%0";
1214
1215 if (x >= 0 && x <= 127)
1216 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1217
8f90be4c 1218 /* Try exact power of two. */
6e3a343d 1219 else if (CONST_OK_FOR_M (x))
8f90be4c 1220 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1221
1222 /* Try exact power of two - 1. */
6e3a343d 1223 else if (CONST_OK_FOR_N (x))
8f90be4c 1224 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1225
6e3a343d
NC
1226 else
1227 {
1228 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1229 gcc_unreachable ();
1230 }
8f90be4c
NC
1231
1232 switch (trick_no)
1233 {
1234 case 1:
1235 strcpy (buf, load_op);
1236 break;
1237 case 2: /* not */
6e3a343d 1238 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1239 break;
1240 case 3: /* add */
6e3a343d 1241 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1242 break;
1243 case 4: /* sub */
6e3a343d 1244 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1245 break;
1246 case 5: /* rsub */
4816b8e4 1247 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1248 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1249 break;
6e3a343d
NC
1250 case 6: /* bseti */
1251 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1252 break;
1253 case 7: /* bclr */
6e3a343d 1254 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1255 break;
1256 case 8: /* rotl */
6e3a343d 1257 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1258 break;
1259 case 9: /* lsl */
6e3a343d 1260 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1261 break;
1262 case 10: /* ixh */
6e3a343d 1263 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1264 break;
1265 case 11: /* ixw */
6e3a343d 1266 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1267 break;
1268 default:
1269 return "";
1270 }
1271
1272 output_asm_insn (buf, out_operands);
1273
1274 return "";
1275}
1276
1277/* Output a move of a word or less value. */
4816b8e4 1278
f27cd94d 1279const char *
08903e08 1280mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
ef4bddc2 1281 machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1282{
1283 rtx dst = operands[0];
1284 rtx src = operands[1];
1285
1286 if (GET_CODE (dst) == REG)
1287 {
1288 if (GET_CODE (src) == REG)
1289 {
1290 if (REGNO (src) == CC_REG) /* r-c */
1291 return "mvc\t%0";
1292 else
1293 return "mov\t%0,%1"; /* r-r*/
1294 }
1295 else if (GET_CODE (src) == MEM)
1296 {
1297 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1298 return "lrw\t%0,[%1]"; /* a-R */
1299 else
f0f4da32
RS
1300 switch (GET_MODE (src)) /* r-m */
1301 {
4e10a5a7 1302 case E_SImode:
f0f4da32 1303 return "ldw\t%0,%1";
4e10a5a7 1304 case E_HImode:
f0f4da32 1305 return "ld.h\t%0,%1";
4e10a5a7 1306 case E_QImode:
f0f4da32
RS
1307 return "ld.b\t%0,%1";
1308 default:
6e1f65b5 1309 gcc_unreachable ();
f0f4da32 1310 }
8f90be4c
NC
1311 }
1312 else if (GET_CODE (src) == CONST_INT)
1313 {
6e3a343d 1314 HOST_WIDE_INT x, y;
8f90be4c
NC
1315
1316 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1317 return "movi\t%0,%1";
1318 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1319 return "bgeni\t%0,%P1\t// %1 %x1";
1320 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1321 return "bmaski\t%0,%N1\t// %1 %x1";
1322 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1323 return output_inline_const (SImode, operands); /* 1-2 insns */
1324 else
4816b8e4 1325 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1326 }
1327 else
4816b8e4 1328 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1329 }
1330 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1331 switch (GET_MODE (dst))
1332 {
4e10a5a7 1333 case E_SImode:
f0f4da32 1334 return "stw\t%1,%0";
4e10a5a7 1335 case E_HImode:
f0f4da32 1336 return "st.h\t%1,%0";
4e10a5a7 1337 case E_QImode:
f0f4da32
RS
1338 return "st.b\t%1,%0";
1339 default:
6e1f65b5 1340 gcc_unreachable ();
f0f4da32 1341 }
8f90be4c 1342
6e1f65b5 1343 gcc_unreachable ();
8f90be4c
NC
1344}
1345
8f90be4c
NC
1346/* Return a sequence of instructions to perform DI or DF move.
1347 Since the MCORE cannot move a DI or DF in one instruction, we have
1348 to take care when we see overlapping source and dest registers. */
4816b8e4 1349
f27cd94d 1350const char *
ef4bddc2 1351mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1352{
1353 rtx dst = operands[0];
1354 rtx src = operands[1];
1355
1356 if (GET_CODE (dst) == REG)
1357 {
1358 if (GET_CODE (src) == REG)
1359 {
1360 int dstreg = REGNO (dst);
1361 int srcreg = REGNO (src);
4816b8e4 1362
8f90be4c
NC
1363 /* Ensure the second source not overwritten. */
1364 if (srcreg + 1 == dstreg)
1365 return "mov %R0,%R1\n\tmov %0,%1";
1366 else
1367 return "mov %0,%1\n\tmov %R0,%R1";
1368 }
1369 else if (GET_CODE (src) == MEM)
1370 {
d72fe292 1371 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1372 int dstreg = REGNO (dst);
1373 int basereg = -1;
1374
1375 if (GET_CODE (memexp) == LABEL_REF)
1376 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1377 else if (GET_CODE (memexp) == REG)
1378 basereg = REGNO (memexp);
1379 else if (GET_CODE (memexp) == PLUS)
1380 {
1381 if (GET_CODE (XEXP (memexp, 0)) == REG)
1382 basereg = REGNO (XEXP (memexp, 0));
1383 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1384 basereg = REGNO (XEXP (memexp, 1));
1385 else
6e1f65b5 1386 gcc_unreachable ();
8f90be4c
NC
1387 }
1388 else
6e1f65b5 1389 gcc_unreachable ();
8f90be4c 1390
4816b8e4 1391 /* ??? length attribute is wrong here. */
8f90be4c
NC
1392 if (dstreg == basereg)
1393 {
4816b8e4 1394 /* Just load them in reverse order. */
8f90be4c 1395 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1396
8f90be4c 1397 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1398 and then fall through. */
8f90be4c
NC
1399 }
1400 else
1401 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1402 }
1403 else if (GET_CODE (src) == CONST_INT)
1404 {
1405 if (TARGET_LITTLE_END)
1406 {
1407 if (CONST_OK_FOR_I (INTVAL (src)))
1408 output_asm_insn ("movi %0,%1", operands);
1409 else if (CONST_OK_FOR_M (INTVAL (src)))
1410 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1411 else if (CONST_OK_FOR_N (INTVAL (src)))
1412 output_asm_insn ("bmaski %0,%N1", operands);
1413 else
6e1f65b5 1414 gcc_unreachable ();
8f90be4c
NC
1415
1416 if (INTVAL (src) < 0)
1417 return "bmaski %R0,32";
1418 else
1419 return "movi %R0,0";
1420 }
1421 else
1422 {
1423 if (CONST_OK_FOR_I (INTVAL (src)))
1424 output_asm_insn ("movi %R0,%1", operands);
1425 else if (CONST_OK_FOR_M (INTVAL (src)))
1426 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1427 else if (CONST_OK_FOR_N (INTVAL (src)))
1428 output_asm_insn ("bmaski %R0,%N1", operands);
1429 else
6e1f65b5 1430 gcc_unreachable ();
6e3a343d 1431
8f90be4c
NC
1432 if (INTVAL (src) < 0)
1433 return "bmaski %0,32";
1434 else
1435 return "movi %0,0";
1436 }
1437 }
1438 else
6e1f65b5 1439 gcc_unreachable ();
8f90be4c
NC
1440 }
1441 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1442 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1443 else
6e1f65b5 1444 gcc_unreachable ();
8f90be4c
NC
1445}
1446
1447/* Predicates used by the templates. */
1448
8f90be4c 1449int
08903e08 1450mcore_arith_S_operand (rtx op)
8f90be4c
NC
1451{
1452 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1453 return 1;
1454
1455 return 0;
1456}
1457
4816b8e4
NC
1458/* Expand insert bit field. BRC */
1459
8f90be4c 1460int
08903e08 1461mcore_expand_insv (rtx operands[])
8f90be4c
NC
1462{
1463 int width = INTVAL (operands[1]);
1464 int posn = INTVAL (operands[2]);
1465 int mask;
1466 rtx mreg, sreg, ereg;
1467
1468 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1469 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1470 we really want the md part to do. */
8f90be4c
NC
1471 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1472 {
4816b8e4
NC
1473 /* Do directly with bseti or bclri. */
1474 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1475 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1476 {
1477 mask = ~(1 << posn);
f7df4a84
RS
1478 emit_insn (gen_rtx_SET (operands[0],
1479 gen_rtx_AND (SImode, operands[0],
1480 GEN_INT (mask))));
8f90be4c
NC
1481 }
1482 else
1483 {
1484 mask = 1 << posn;
f7df4a84
RS
1485 emit_insn (gen_rtx_SET (operands[0],
1486 gen_rtx_IOR (SImode, operands[0],
1487 GEN_INT (mask))));
8f90be4c
NC
1488 }
1489
1490 return 1;
1491 }
1492
43a88a8c 1493 /* Look at some bit-field placements that we aren't interested
4816b8e4 1494 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1495 if (! TARGET_W_FIELD)
1496 return 0; /* Generally, give up about now. */
1497
1498 if (width == 8 && posn % 8 == 0)
1499 /* Byte sized and aligned; let caller break it up. */
1500 return 0;
1501
1502 if (width == 16 && posn % 16 == 0)
1503 /* Short sized and aligned; let caller break it up. */
1504 return 0;
1505
1506 /* The general case - we can do this a little bit better than what the
1507 machine independent part tries. This will get rid of all the subregs
1508 that mess up constant folding in combine when working with relaxed
4816b8e4 1509 immediates. */
8f90be4c
NC
1510
1511 /* If setting the entire field, do it directly. */
6e3a343d
NC
1512 if (GET_CODE (operands[3]) == CONST_INT
1513 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1514 {
1515 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f7df4a84
RS
1516 emit_insn (gen_rtx_SET (operands[0],
1517 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1518 return 1;
1519 }
1520
1521 /* Generate the clear mask. */
1522 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1523
1524 /* Clear the field, to overlay it later with the source. */
f7df4a84
RS
1525 emit_insn (gen_rtx_SET (operands[0],
1526 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1527
1528 /* If the source is constant 0, we've nothing to add back. */
1529 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1530 return 1;
1531
1532 /* XXX: Should we worry about more games with constant values?
1533 We've covered the high profile: set/clear single-bit and many-bit
1534 fields. How often do we see "arbitrary bit pattern" constants? */
1535 sreg = copy_to_mode_reg (SImode, operands[3]);
1536
1537 /* Extract src as same width as dst (needed for signed values). We
1538 always have to do this since we widen everything to SImode.
1539 We don't have to mask if we're shifting this up against the
1540 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1541 bits. */
f27cd94d 1542 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1543 {
1544 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f7df4a84 1545 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1546 }
1547
4816b8e4 1548 /* Insert source value in dest. */
8f90be4c 1549 if (posn != 0)
f7df4a84
RS
1550 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1551 GEN_INT (posn))));
8f90be4c 1552
f7df4a84
RS
1553 emit_insn (gen_rtx_SET (operands[0],
1554 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1555
1556 return 1;
1557}
8f90be4c
NC
1558\f
1559/* ??? Block move stuff stolen from m88k. This code has not been
1560 verified for correctness. */
1561
1562/* Emit code to perform a block move. Choose the best method.
1563
1564 OPERANDS[0] is the destination.
1565 OPERANDS[1] is the source.
1566 OPERANDS[2] is the size.
1567 OPERANDS[3] is the alignment safe to use. */
1568
1569/* Emit code to perform a block move with an offset sequence of ldw/st
1570 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1571 known constants. DEST and SRC are registers. OFFSET is the known
1572 starting point for the output pattern. */
1573
ef4bddc2 1574static const machine_mode mode_from_align[] =
8f90be4c
NC
1575{
1576 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1577};
1578
1579static void
88042663 1580block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1581{
1582 rtx temp[2];
ef4bddc2 1583 machine_mode mode[2];
8f90be4c 1584 int amount[2];
88042663 1585 bool active[2];
8f90be4c
NC
1586 int phase = 0;
1587 int next;
88042663
RH
1588 int offset_ld = 0;
1589 int offset_st = 0;
1590 rtx x;
8f90be4c 1591
88042663
RH
1592 x = XEXP (dst_mem, 0);
1593 if (!REG_P (x))
1594 {
1595 x = force_reg (Pmode, x);
1596 dst_mem = replace_equiv_address (dst_mem, x);
1597 }
8f90be4c 1598
88042663
RH
1599 x = XEXP (src_mem, 0);
1600 if (!REG_P (x))
8f90be4c 1601 {
88042663
RH
1602 x = force_reg (Pmode, x);
1603 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1604 }
1605
88042663
RH
1606 active[0] = active[1] = false;
1607
8f90be4c
NC
1608 do
1609 {
8f90be4c 1610 next = phase;
88042663 1611 phase ^= 1;
8f90be4c
NC
1612
1613 if (size > 0)
1614 {
88042663
RH
1615 int next_amount;
1616
1617 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1618 next_amount = MIN (next_amount, align);
1619
1620 amount[next] = next_amount;
1621 mode[next] = mode_from_align[next_amount];
1622 temp[next] = gen_reg_rtx (mode[next]);
1623
1624 x = adjust_address (src_mem, mode[next], offset_ld);
f7df4a84 1625 emit_insn (gen_rtx_SET (temp[next], x));
88042663
RH
1626
1627 offset_ld += next_amount;
1628 size -= next_amount;
1629 active[next] = true;
8f90be4c
NC
1630 }
1631
1632 if (active[phase])
1633 {
88042663 1634 active[phase] = false;
8f90be4c 1635
88042663 1636 x = adjust_address (dst_mem, mode[phase], offset_st);
f7df4a84 1637 emit_insn (gen_rtx_SET (x, temp[phase]));
88042663 1638
8f90be4c
NC
1639 offset_st += amount[phase];
1640 }
1641 }
1642 while (active[next]);
1643}
1644
88042663
RH
1645bool
1646mcore_expand_block_move (rtx *operands)
8f90be4c 1647{
88042663
RH
1648 HOST_WIDE_INT align, bytes, max;
1649
1650 if (GET_CODE (operands[2]) != CONST_INT)
1651 return false;
1652
1653 bytes = INTVAL (operands[2]);
1654 align = INTVAL (operands[3]);
8f90be4c 1655
88042663
RH
1656 if (bytes <= 0)
1657 return false;
1658 if (align > 4)
1659 align = 4;
1660
1661 switch (align)
8f90be4c 1662 {
88042663
RH
1663 case 4:
1664 if (bytes & 1)
1665 max = 4*4;
1666 else if (bytes & 3)
1667 max = 8*4;
1668 else
1669 max = 16*4;
1670 break;
1671 case 2:
1672 max = 4*2;
1673 break;
1674 case 1:
1675 max = 4*1;
1676 break;
1677 default:
6e1f65b5 1678 gcc_unreachable ();
88042663
RH
1679 }
1680
1681 if (bytes <= max)
1682 {
1683 block_move_sequence (operands[0], operands[1], bytes, align);
1684 return true;
8f90be4c
NC
1685 }
1686
88042663 1687 return false;
8f90be4c
NC
1688}
1689\f
1690
1691/* Code to generate prologue and epilogue sequences. */
1692static int number_of_regs_before_varargs;
4816b8e4 1693
bd5bd7ac 1694/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1695 for a varargs function. */
1696static int current_function_anonymous_args;
1697
8f90be4c
NC
1698#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1699#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1700#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1701
8f90be4c 1702static void
08903e08 1703layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1704{
1705 int n;
1706 unsigned int i;
1707 int nbytes;
1708 int regarg;
1709 int localregarg;
8f90be4c
NC
1710 int outbounds;
1711 unsigned int growths;
1712 int step;
1713
1714 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1715 was passed partially in registers and partially on the stack. */
38173d38 1716 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1717
1718 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1719 if (current_function_anonymous_args)
1720 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1721
1722 infp->arg_size = nbytes;
1723
1724 /* How much space to save non-volatile registers we stomp. */
1725 infp->reg_mask = calc_live_regs (& n);
1726 infp->reg_size = n * 4;
1727
14bc6742 1728 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1729 infp->local_size = get_frame_size ();
38173d38 1730 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1731
1732 /* Make sure we have a whole number of words for the locals. */
1733 if (infp->local_size % STACK_BYTES)
1734 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1735
1736 /* Only thing we know we have to pad is the outbound space, since
1737 we've aligned our locals assuming that base of locals is aligned. */
1738 infp->pad_local = 0;
1739 infp->pad_reg = 0;
1740 infp->pad_outbound = 0;
1741 if (infp->outbound_size % STACK_BYTES)
1742 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1743
1744 /* Now we see how we want to stage the prologue so that it does
1745 the most appropriate stack growth and register saves to either:
1746 (1) run fast,
1747 (2) reduce instruction space, or
1748 (3) reduce stack space. */
b6a1cbae 1749 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1750 infp->growth[i] = 0;
1751
1752 regarg = infp->reg_size + infp->arg_size;
1753 localregarg = infp->local_size + regarg;
8f90be4c
NC
1754 outbounds = infp->outbound_size + infp->pad_outbound;
1755 growths = 0;
1756
1757 /* XXX: Consider one where we consider localregarg + outbound too! */
1758
1759 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1760 use stw's with offsets and buy the frame in one shot. */
1761 if (localregarg <= ADDI_REACH
1762 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1763 {
1764 /* Make sure we'll be aligned. */
1765 if (localregarg % STACK_BYTES)
1766 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1767
1768 step = localregarg + infp->pad_reg;
1769 infp->reg_offset = infp->local_size;
1770
1771 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1772 {
1773 step += outbounds;
1774 infp->reg_offset += outbounds;
1775 outbounds = 0;
1776 }
1777
1778 infp->arg_offset = step - 4;
1779 infp->growth[growths++] = step;
1780 infp->reg_growth = growths;
1781 infp->local_growth = growths;
1782
4816b8e4 1783 /* If we haven't already folded it in. */
8f90be4c
NC
1784 if (outbounds)
1785 infp->growth[growths++] = outbounds;
1786
1787 goto finish;
1788 }
1789
1790 /* Frame can't be done with a single subi, but can be done with 2
1791 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1792 shift some of the stack purchase into the first subi, so both are
1793 single instructions. */
1794 if (localregarg <= STORE_REACH
1795 && (infp->local_size > ADDI_REACH)
1796 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1797 {
1798 int all;
1799
1800 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1801 if (localregarg % STACK_BYTES)
1802 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1803
1804 all = localregarg + infp->pad_reg + infp->pad_local;
1805 step = ADDI_REACH; /* As much up front as we can. */
1806 if (step > all)
1807 step = all;
1808
1809 /* XXX: Consider whether step will still be aligned; we believe so. */
1810 infp->arg_offset = step - 4;
1811 infp->growth[growths++] = step;
1812 infp->reg_growth = growths;
1813 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1814 all -= step;
1815
4816b8e4 1816 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1817 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1818 {
1819 all += outbounds;
1820 outbounds = 0;
1821 }
1822
4816b8e4 1823 /* Get the rest of the locals in place. */
8f90be4c
NC
1824 step = all;
1825 infp->growth[growths++] = step;
1826 infp->local_growth = growths;
1827 all -= step;
1828
819bfe0e 1829 gcc_assert (all == 0);
8f90be4c 1830
4816b8e4 1831 /* Finish off if we need to do so. */
8f90be4c
NC
1832 if (outbounds)
1833 infp->growth[growths++] = outbounds;
1834
1835 goto finish;
1836 }
1837
1838 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1839 Then we buy the rest of the frame in 1 or 2 steps depending on
1840 whether we need a frame pointer. */
1841 if ((regarg % STACK_BYTES) == 0)
1842 {
1843 infp->growth[growths++] = regarg;
1844 infp->reg_growth = growths;
1845 infp->arg_offset = regarg - 4;
1846 infp->reg_offset = 0;
1847
1848 if (infp->local_size % STACK_BYTES)
1849 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1850
1851 step = infp->local_size + infp->pad_local;
1852
1853 if (!frame_pointer_needed)
1854 {
1855 step += outbounds;
1856 outbounds = 0;
1857 }
1858
1859 infp->growth[growths++] = step;
1860 infp->local_growth = growths;
1861
4816b8e4 1862 /* If there's any left to be done. */
8f90be4c
NC
1863 if (outbounds)
1864 infp->growth[growths++] = outbounds;
1865
1866 goto finish;
1867 }
1868
1869 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1870 -- regarg is not aligned, but it's a small number of registers;
1871 use some of localsize so that regarg is aligned and then
1872 save the registers. */
8f90be4c
NC
1873
1874 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1875 -- does not optimize space consumption.
1876 -- does not attempt to optimize instruction counts.
1877 -- but it is safe for all alignments. */
8f90be4c
NC
1878 if (regarg % STACK_BYTES != 0)
1879 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1880
1881 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1882 infp->reg_growth = growths;
1883 infp->arg_offset = infp->growth[0] - 4;
1884 infp->reg_offset = 0;
1885
1886 if (frame_pointer_needed)
1887 {
1888 if (infp->local_size % STACK_BYTES != 0)
1889 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1890
1891 infp->growth[growths++] = infp->local_size + infp->pad_local;
1892 infp->local_growth = growths;
1893
1894 infp->growth[growths++] = outbounds;
1895 }
1896 else
1897 {
1898 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1899 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1900
1901 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1902 infp->local_growth = growths;
1903 }
1904
f27cd94d 1905 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1906 finish:
819bfe0e
JM
1907 gcc_assert (infp->reg_offset >= 0);
1908 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1909
1910 for (i = 0; i < growths; i++)
6e1f65b5 1911 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1912}
1913
1914/* Define the offset between two registers, one to be eliminated, and
1915 the other its replacement, at the start of a routine. */
4816b8e4 1916
8f90be4c 1917int
08903e08 1918mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1919{
1920 int above_frame;
1921 int below_frame;
1922 struct mcore_frame fi;
1923
1924 layout_mcore_frame (& fi);
1925
1926 /* fp to ap */
1927 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1928 /* sp to fp */
1929 below_frame = fi.outbound_size + fi.pad_outbound;
1930
1931 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1932 return above_frame;
1933
1934 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1935 return above_frame + below_frame;
1936
1937 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1938 return below_frame;
1939
6e1f65b5 1940 gcc_unreachable ();
8f90be4c
NC
1941}
1942
4816b8e4
NC
1943/* Keep track of some information about varargs for the prolog. */
1944
09a2b93a 1945static void
d5cc9181 1946mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
e7056ca4 1947 const function_arg_info &arg,
09a2b93a
KH
1948 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1949 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1950{
d5cc9181
JR
1951 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1952
8f90be4c
NC
1953 current_function_anonymous_args = 1;
1954
1955 /* We need to know how many argument registers are used before
1956 the varargs start, so that we can push the remaining argument
1957 registers during the prologue. */
e7056ca4
RS
1958 number_of_regs_before_varargs
1959 = *args_so_far + mcore_num_arg_regs (arg.mode, arg.type);
8f90be4c 1960
dab66575 1961 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1962 Until I can find it this workaround always pushes the
1963 last named argument onto the stack. */
09a2b93a 1964 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1965
1966 /* The last named argument may be split between argument registers
1967 and the stack. Allow for this here. */
1968 if (number_of_regs_before_varargs > NPARM_REGS)
1969 number_of_regs_before_varargs = NPARM_REGS;
1970}
1971
1972void
08903e08 1973mcore_expand_prolog (void)
8f90be4c
NC
1974{
1975 struct mcore_frame fi;
1976 int space_allocated = 0;
1977 int growth = 0;
1978
1979 /* Find out what we're doing. */
1980 layout_mcore_frame (&fi);
1981
1982 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1983 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1984
1985 if (TARGET_CG_DATA)
1986 {
1987 /* Emit a symbol for this routine's frame size. */
1988 rtx x;
8f90be4c
NC
1989
1990 x = DECL_RTL (current_function_decl);
1991
6e1f65b5 1992 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1993
1994 x = XEXP (x, 0);
1995
6e1f65b5 1996 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1997
04695783 1998 free (mcore_current_function_name);
8f90be4c 1999
1dcd444b 2000 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
2001
2002 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2003
e3b5732b 2004 if (cfun->calls_alloca)
8f90be4c
NC
2005 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2006
2007 /* 970425: RBE:
2008 We're looking at how the 8byte alignment affects stack layout
2009 and where we had to pad things. This emits information we can
2010 extract which tells us about frame sizes and the like. */
2011 fprintf (asm_out_file,
2012 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2013 mcore_current_function_name,
2014 fi.arg_size, fi.reg_size, fi.reg_mask,
2015 fi.local_size, fi.outbound_size,
2016 frame_pointer_needed);
2017 }
2018
2019 if (mcore_naked_function_p ())
2020 return;
2021
2022 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 2023 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2024
2025 /* If we have a parameter passed partially in regs and partially in memory,
2026 the registers will have been stored to memory already in function.c. So
2027 we only need to do something here for varargs functions. */
38173d38 2028 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2029 {
2030 int offset;
2031 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2032 int remaining = fi.arg_size;
2033
2034 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2035 {
2036 emit_insn (gen_movsi
f1c25d3b 2037 (gen_rtx_MEM (SImode,
0a81f074
RS
2038 plus_constant (Pmode, stack_pointer_rtx,
2039 offset)),
f1c25d3b 2040 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2041 }
2042 }
2043
4816b8e4 2044 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2045 if (growth < fi.reg_growth)
08903e08 2046 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2047
2048 if (fi.reg_size != 0)
2049 {
2050 int i;
2051 int offs = fi.reg_offset;
2052
2053 for (i = 15; i >= 0; i--)
2054 {
2055 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2056 {
2057 int first_reg = 15;
2058
2059 while (fi.reg_mask & (1 << first_reg))
2060 first_reg--;
2061 first_reg++;
2062
f1c25d3b
KH
2063 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2064 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2065 GEN_INT (16 - first_reg)));
2066
2067 i -= (15 - first_reg);
2068 offs += (16 - first_reg) * 4;
2069 }
2070 else if (fi.reg_mask & (1 << i))
2071 {
2072 emit_insn (gen_movsi
f1c25d3b 2073 (gen_rtx_MEM (SImode,
0a81f074
RS
2074 plus_constant (Pmode, stack_pointer_rtx,
2075 offs)),
f1c25d3b 2076 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2077 offs += 4;
2078 }
2079 }
2080 }
2081
2082 /* Figure the locals + outbounds. */
2083 if (frame_pointer_needed)
2084 {
2085 /* If we haven't already purchased to 'fp'. */
2086 if (growth < fi.local_growth)
08903e08 2087 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2088
2089 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2090
4816b8e4 2091 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2092 if (fi.growth[growth])
2093 output_stack_adjust (-1, fi.growth[growth++]);
2094 }
2095 else
2096 {
2097 if (growth < fi.local_growth)
08903e08 2098 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2099 if (fi.growth[growth])
2100 output_stack_adjust (-1, fi.growth[growth++]);
2101 }
2102}
2103
2104void
08903e08 2105mcore_expand_epilog (void)
8f90be4c
NC
2106{
2107 struct mcore_frame fi;
2108 int i;
2109 int offs;
2110 int growth = MAX_STACK_GROWS - 1 ;
2111
f27cd94d 2112
8f90be4c
NC
2113 /* Find out what we're doing. */
2114 layout_mcore_frame(&fi);
2115
2116 if (mcore_naked_function_p ())
2117 return;
f27cd94d 2118
8f90be4c
NC
2119 /* If we had a frame pointer, restore the sp from that. */
2120 if (frame_pointer_needed)
2121 {
2122 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2123 growth = fi.local_growth - 1;
2124 }
2125 else
2126 {
2127 /* XXX: while loop should accumulate and do a single sell. */
2128 while (growth >= fi.local_growth)
2129 {
2130 if (fi.growth[growth] != 0)
2131 output_stack_adjust (1, fi.growth[growth]);
2132 growth--;
2133 }
2134 }
2135
2136 /* Make sure we've shrunk stack back to the point where the registers
2137 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2138 register save information back off the stack. */
8f90be4c
NC
2139 while (growth >= fi.reg_growth)
2140 output_stack_adjust ( 1, fi.growth[growth--]);
2141
2142 offs = fi.reg_offset;
2143
2144 for (i = 15; i >= 0; i--)
2145 {
2146 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2147 {
2148 int first_reg;
2149
2150 /* Find the starting register. */
2151 first_reg = 15;
2152
2153 while (fi.reg_mask & (1 << first_reg))
2154 first_reg--;
2155
2156 first_reg++;
2157
f1c25d3b
KH
2158 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2159 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2160 GEN_INT (16 - first_reg)));
2161
2162 i -= (15 - first_reg);
2163 offs += (16 - first_reg) * 4;
2164 }
2165 else if (fi.reg_mask & (1 << i))
2166 {
2167 emit_insn (gen_movsi
f1c25d3b
KH
2168 (gen_rtx_REG (SImode, i),
2169 gen_rtx_MEM (SImode,
0a81f074
RS
2170 plus_constant (Pmode, stack_pointer_rtx,
2171 offs))));
8f90be4c
NC
2172 offs += 4;
2173 }
2174 }
2175
2176 /* Give back anything else. */
dab66575 2177 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2178 while (growth >= 0)
2179 output_stack_adjust ( 1, fi.growth[growth--]);
2180}
2181\f
2182/* This code is borrowed from the SH port. */
2183
2184/* The MCORE cannot load a large constant into a register, constants have to
2185 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2186 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2187 means that we often have to dump a constant inside a function, and
2188 generate code to branch around it.
2189
2190 It is important to minimize this, since the branches will slow things
2191 down and make things bigger.
2192
2193 Worst case code looks like:
2194
2195 lrw L1,r0
2196 br L2
2197 align
2198 L1: .long value
2199 L2:
2200 ..
2201
2202 lrw L3,r0
2203 br L4
2204 align
2205 L3: .long value
2206 L4:
2207 ..
2208
2209 We fix this by performing a scan before scheduling, which notices which
2210 instructions need to have their operands fetched from the constant table
2211 and builds the table.
2212
2213 The algorithm is:
2214
2215 scan, find an instruction which needs a pcrel move. Look forward, find the
2216 last barrier which is within MAX_COUNT bytes of the requirement.
2217 If there isn't one, make one. Process all the instructions between
2218 the find and the barrier.
2219
2220 In the above example, we can tell that L3 is within 1k of L1, so
2221 the first move can be shrunk from the 2 insn+constant sequence into
2222 just 1 insn, and the constant moved to L3 to make:
2223
2224 lrw L1,r0
2225 ..
2226 lrw L3,r0
2227 bra L4
2228 align
2229 L3:.long value
2230 L4:.long value
2231
2232 Then the second move becomes the target for the shortening process. */
2233
2234typedef struct
2235{
2236 rtx value; /* Value in table. */
2237 rtx label; /* Label of value. */
2238} pool_node;
2239
2240/* The maximum number of constants that can fit into one pool, since
2241 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2242 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2243 we need to add a branch/align before the constant pool. */
2244
2245#define MAX_COUNT 1016
2246#define MAX_POOL_SIZE (MAX_COUNT/4)
2247static pool_node pool_vector[MAX_POOL_SIZE];
2248static int pool_size;
2249
2250/* Dump out any constants accumulated in the final pass. These
2251 will only be labels. */
4816b8e4 2252
f27cd94d 2253const char *
08903e08 2254mcore_output_jump_label_table (void)
8f90be4c
NC
2255{
2256 int i;
2257
2258 if (pool_size)
2259 {
2260 fprintf (asm_out_file, "\t.align 2\n");
2261
2262 for (i = 0; i < pool_size; i++)
2263 {
2264 pool_node * p = pool_vector + i;
2265
4977bab6 2266 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2267
2268 output_asm_insn (".long %0", &p->value);
2269 }
2270
2271 pool_size = 0;
2272 }
2273
2274 return "";
2275}
2276
8f90be4c 2277/* Check whether insn is a candidate for a conditional. */
4816b8e4 2278
8f90be4c 2279static cond_type
08903e08 2280is_cond_candidate (rtx insn)
8f90be4c
NC
2281{
2282 /* The only things we conditionalize are those that can be directly
2283 changed into a conditional. Only bother with SImode items. If
2284 we wanted to be a little more aggressive, we could also do other
4816b8e4 2285 modes such as DImode with reg-reg move or load 0. */
b64925dc 2286 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2287 {
2288 rtx pat = PATTERN (insn);
2289 rtx src, dst;
2290
2291 if (GET_CODE (pat) != SET)
2292 return COND_NO;
2293
2294 dst = XEXP (pat, 0);
2295
2296 if ((GET_CODE (dst) != REG &&
2297 GET_CODE (dst) != SUBREG) ||
2298 GET_MODE (dst) != SImode)
2299 return COND_NO;
2300
2301 src = XEXP (pat, 1);
2302
2303 if ((GET_CODE (src) == REG ||
2304 (GET_CODE (src) == SUBREG &&
2305 GET_CODE (SUBREG_REG (src)) == REG)) &&
2306 GET_MODE (src) == SImode)
2307 return COND_MOV_INSN;
2308 else if (GET_CODE (src) == CONST_INT &&
2309 INTVAL (src) == 0)
2310 return COND_CLR_INSN;
2311 else if (GET_CODE (src) == PLUS &&
2312 (GET_CODE (XEXP (src, 0)) == REG ||
2313 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2314 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2315 GET_MODE (XEXP (src, 0)) == SImode &&
2316 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2317 INTVAL (XEXP (src, 1)) == 1)
2318 return COND_INC_INSN;
2319 else if (((GET_CODE (src) == MINUS &&
2320 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2321 INTVAL( XEXP (src, 1)) == 1) ||
2322 (GET_CODE (src) == PLUS &&
2323 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2324 INTVAL (XEXP (src, 1)) == -1)) &&
2325 (GET_CODE (XEXP (src, 0)) == REG ||
2326 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2327 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2328 GET_MODE (XEXP (src, 0)) == SImode)
2329 return COND_DEC_INSN;
2330
14bc6742 2331 /* Some insns that we don't bother with:
8f90be4c
NC
2332 (set (rx:DI) (ry:DI))
2333 (set (rx:DI) (const_int 0))
2334 */
2335
2336 }
b64925dc
SB
2337 else if (JUMP_P (insn)
2338 && GET_CODE (PATTERN (insn)) == SET
2339 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
8f90be4c
NC
2340 return COND_BRANCH_INSN;
2341
2342 return COND_NO;
2343}
2344
2345/* Emit a conditional version of insn and replace the old insn with the
2346 new one. Return the new insn if emitted. */
4816b8e4 2347
b32d5189 2348static rtx_insn *
d8485bdb 2349emit_new_cond_insn (rtx_insn *insn, int cond)
8f90be4c
NC
2350{
2351 rtx c_insn = 0;
2352 rtx pat, dst, src;
2353 cond_type num;
2354
2355 if ((num = is_cond_candidate (insn)) == COND_NO)
2356 return NULL;
2357
2358 pat = PATTERN (insn);
2359
b64925dc 2360 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2361 {
2362 dst = SET_DEST (pat);
2363 src = SET_SRC (pat);
2364 }
2365 else
cd4c46f3
KG
2366 {
2367 dst = JUMP_LABEL (insn);
2368 src = NULL_RTX;
2369 }
8f90be4c
NC
2370
2371 switch (num)
2372 {
2373 case COND_MOV_INSN:
2374 case COND_CLR_INSN:
2375 if (cond)
2376 c_insn = gen_movt0 (dst, src, dst);
2377 else
2378 c_insn = gen_movt0 (dst, dst, src);
2379 break;
2380
2381 case COND_INC_INSN:
2382 if (cond)
2383 c_insn = gen_incscc (dst, dst);
2384 else
2385 c_insn = gen_incscc_false (dst, dst);
2386 break;
2387
2388 case COND_DEC_INSN:
2389 if (cond)
2390 c_insn = gen_decscc (dst, dst);
2391 else
2392 c_insn = gen_decscc_false (dst, dst);
2393 break;
2394
2395 case COND_BRANCH_INSN:
2396 if (cond)
2397 c_insn = gen_branch_true (dst);
2398 else
2399 c_insn = gen_branch_false (dst);
2400 break;
2401
2402 default:
2403 return NULL;
2404 }
2405
2406 /* Only copy the notes if they exist. */
2407 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2408 {
2409 /* We really don't need to bother with the notes and links at this
2410 point, but go ahead and save the notes. This will help is_dead()
2411 when applying peepholes (links don't matter since they are not
2412 used any more beyond this point for the mcore). */
2413 REG_NOTES (c_insn) = REG_NOTES (insn);
2414 }
2415
2416 if (num == COND_BRANCH_INSN)
2417 {
2418 /* For jumps, we need to be a little bit careful and emit the new jump
2419 before the old one and to update the use count for the target label.
2420 This way, the barrier following the old (uncond) jump will get
2421 deleted, but the label won't. */
2422 c_insn = emit_jump_insn_before (c_insn, insn);
2423
2424 ++ LABEL_NUSES (dst);
2425
2426 JUMP_LABEL (c_insn) = dst;
2427 }
2428 else
2429 c_insn = emit_insn_after (c_insn, insn);
2430
2431 delete_insn (insn);
2432
b32d5189 2433 return as_a <rtx_insn *> (c_insn);
8f90be4c
NC
2434}
2435
2436/* Attempt to change a basic block into a series of conditional insns. This
2437 works by taking the branch at the end of the 1st block and scanning for the
2438 end of the 2nd block. If all instructions in the 2nd block have cond.
2439 versions and the label at the start of block 3 is the same as the target
2440 from the branch at block 1, then conditionalize all insn in block 2 using
2441 the inverse condition of the branch at block 1. (Note I'm bending the
2442 definition of basic block here.)
2443
2444 e.g., change:
2445
2446 bt L2 <-- end of block 1 (delete)
2447 mov r7,r8
2448 addu r7,1
2449 br L3 <-- end of block 2
2450
2451 L2: ... <-- start of block 3 (NUSES==1)
2452 L3: ...
2453
2454 to:
2455
2456 movf r7,r8
2457 incf r7
2458 bf L3
2459
2460 L3: ...
2461
2462 we can delete the L2 label if NUSES==1 and re-apply the optimization
2463 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2464 if-then-else statement to be conditionalized. BRC */
b32d5189
DM
2465static rtx_insn *
2466conditionalize_block (rtx_insn *first)
8f90be4c 2467{
b32d5189 2468 rtx_insn *insn;
8f90be4c 2469 rtx br_pat;
b32d5189
DM
2470 rtx_insn *end_blk_1_br = 0;
2471 rtx_insn *end_blk_2_insn = 0;
2472 rtx_insn *start_blk_3_lab = 0;
8f90be4c
NC
2473 int cond;
2474 int br_lab_num;
2475 int blk_size = 0;
2476
2477
2478 /* Check that the first insn is a candidate conditional jump. This is
2479 the one that we'll eliminate. If not, advance to the next insn to
2480 try. */
b64925dc
SB
2481 if (! JUMP_P (first)
2482 || GET_CODE (PATTERN (first)) != SET
2483 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
8f90be4c
NC
2484 return NEXT_INSN (first);
2485
2486 /* Extract some information we need. */
2487 end_blk_1_br = first;
2488 br_pat = PATTERN (end_blk_1_br);
2489
2490 /* Complement the condition since we use the reverse cond. for the insns. */
2491 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2492
2493 /* Determine what kind of branch we have. */
2494 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2495 {
2496 /* A normal branch, so extract label out of first arm. */
2497 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2498 }
2499 else
2500 {
2501 /* An inverse branch, so extract the label out of the 2nd arm
2502 and complement the condition. */
2503 cond = (cond == 0);
2504 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2505 }
2506
2507 /* Scan forward for the start of block 2: it must start with a
2508 label and that label must be the same as the branch target
2509 label from block 1. We don't care about whether block 2 actually
2510 ends with a branch or a label (an uncond. branch is
2511 conditionalizable). */
2512 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2513 {
2514 enum rtx_code code;
2515
2516 code = GET_CODE (insn);
2517
14bc6742 2518 /* Look for the label at the start of block 3. */
8f90be4c
NC
2519 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2520 break;
2521
2522 /* Skip barriers, notes, and conditionalizable insns. If the
2523 insn is not conditionalizable or makes this optimization fail,
2524 just return the next insn so we can start over from that point. */
2525 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2526 return NEXT_INSN (insn);
2527
112cdef5 2528 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2529 if (code == JUMP_INSN || code == INSN)
2530 {
2531 blk_size ++;
2532 end_blk_2_insn = insn;
2533 }
2534 }
2535
2536 if (!insn)
2537 return insn;
2538
2539 /* It is possible for this optimization to slow performance if the blocks
2540 are long. This really depends upon whether the branch is likely taken
2541 or not. If the branch is taken, we slow performance in many cases. But,
2542 if the branch is not taken, we always help performance (for a single
2543 block, but for a double block (i.e. when the optimization is re-applied)
2544 this is not true since the 'right thing' depends on the overall length of
2545 the collapsed block). As a compromise, don't apply this optimization on
2546 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2547 the best threshold depends on the latencies of the instructions (i.e.,
2548 the branch penalty). */
2549 if (optimize > 1 && blk_size > 2)
2550 return insn;
2551
2552 /* At this point, we've found the start of block 3 and we know that
2553 it is the destination of the branch from block 1. Also, all
2554 instructions in the block 2 are conditionalizable. So, apply the
2555 conditionalization and delete the branch. */
2556 start_blk_3_lab = insn;
2557
2558 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2559 insn = NEXT_INSN (insn))
2560 {
b32d5189 2561 rtx_insn *newinsn;
8f90be4c 2562
4654c0cf 2563 if (insn->deleted ())
8f90be4c
NC
2564 continue;
2565
14bc6742 2566 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2567 if ((newinsn = emit_new_cond_insn (insn, cond)))
2568 {
2569 if (end_blk_2_insn == insn)
2570 end_blk_2_insn = newinsn;
2571
2572 insn = newinsn;
2573 }
2574 }
2575
2576 /* Note whether we will delete the label starting blk 3 when the jump
2577 gets deleted. If so, we want to re-apply this optimization at the
2578 last real instruction right before the label. */
2579 if (LABEL_NUSES (start_blk_3_lab) == 1)
2580 {
2581 start_blk_3_lab = 0;
2582 }
2583
2584 /* ??? we probably should redistribute the death notes for this insn, esp.
2585 the death of cc, but it doesn't really matter this late in the game.
2586 The peepholes all use is_dead() which will find the correct death
2587 regardless of whether there is a note. */
2588 delete_insn (end_blk_1_br);
2589
2590 if (! start_blk_3_lab)
2591 return end_blk_2_insn;
2592
4816b8e4 2593 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2594 return NEXT_INSN (start_blk_3_lab);
2595}
2596
2597/* Apply the conditionalization of blocks optimization. This is the
2598 outer loop that traverses through the insns scanning for a branch
2599 that signifies an opportunity to apply the optimization. Note that
2600 this optimization is applied late. If we could apply it earlier,
2601 say before cse 2, it may expose more optimization opportunities.
2602 but, the pay back probably isn't really worth the effort (we'd have
2603 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2604 in before cse 2). */
2605
8f90be4c 2606static void
08903e08 2607conditionalize_optimization (void)
8f90be4c 2608{
b32d5189 2609 rtx_insn *insn;
8f90be4c 2610
18dbd950 2611 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2612 continue;
2613}
2614
18dbd950 2615/* This is to handle loads from the constant pool. */
4816b8e4 2616
18dbd950 2617static void
08903e08 2618mcore_reorg (void)
8f90be4c
NC
2619{
2620 /* Reset this variable. */
2621 current_function_anonymous_args = 0;
2622
8f90be4c
NC
2623 if (optimize == 0)
2624 return;
2625
2626 /* Conditionalize blocks where we can. */
18dbd950 2627 conditionalize_optimization ();
8f90be4c
NC
2628
2629 /* Literal pool generation is now pushed off until the assembler. */
2630}
2631
2632\f
f0f4da32 2633/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2634
f0f4da32 2635bool
08903e08 2636mcore_r15_operand_p (rtx x)
f0f4da32
RS
2637{
2638 switch (GET_CODE (x))
2639 {
2640 case CONST_INT:
2641 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2642
f0f4da32
RS
2643 case REG:
2644 case SUBREG:
2645 case MEM:
2646 return 1;
2647
2648 default:
2649 return 0;
2650 }
2651}
2652
0a2aaacc 2653/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2654 directly move X into it, use r1-r14 as a temporary. */
08903e08 2655
f0f4da32 2656enum reg_class
0a2aaacc 2657mcore_secondary_reload_class (enum reg_class rclass,
ef4bddc2 2658 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2659{
0a2aaacc 2660 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2661 && !mcore_r15_operand_p (x))
2662 return LRW_REGS;
2663 return NO_REGS;
2664}
8f90be4c 2665
f0f4da32 2666/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2667 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2668 use LRW_REGS instead. */
08903e08 2669
8f90be4c 2670enum reg_class
0a2aaacc 2671mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2672{
0a2aaacc 2673 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2674 return LRW_REGS;
8f90be4c 2675
0a2aaacc 2676 return rclass;
8f90be4c
NC
2677}
2678
2679/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2680 register. Note that the current version doesn't worry about whether
2681 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2682 in r2 matches an SImode in r2. Might think in the future about whether
2683 we want to be able to say something about modes. */
08903e08 2684
8f90be4c 2685int
08903e08 2686mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2687{
14bc6742 2688 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2689 while (GET_CODE (x) == SUBREG)
2690 x = SUBREG_REG (x);
2691
2692 while (GET_CODE (y) == SUBREG)
2693 y = SUBREG_REG (y);
2694
2695 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2696 return 1;
2697
2698 return 0;
2699}
2700
c5387660
JM
2701static void
2702mcore_option_override (void)
8f90be4c 2703{
8f90be4c
NC
2704 /* Only the m340 supports little endian code. */
2705 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2706 target_flags |= MASK_M340;
8f90be4c 2707}
fac0f722 2708
8f90be4c 2709\f
8f90be4c
NC
2710/* Compute the number of word sized registers needed to
2711 hold a function argument of mode MODE and type TYPE. */
08903e08 2712
8f90be4c 2713int
ef4bddc2 2714mcore_num_arg_regs (machine_mode mode, const_tree type)
8f90be4c
NC
2715{
2716 int size;
2717
fe984136 2718 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2719 return 0;
2720
2721 if (type && mode == BLKmode)
2722 size = int_size_in_bytes (type);
2723 else
2724 size = GET_MODE_SIZE (mode);
2725
2726 return ROUND_ADVANCE (size);
2727}
2728
2729static rtx
ef4bddc2 2730handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2731{
2732 int size;
2733
696e78bf 2734 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2735 of bytes is passed packed into registers (or spilled onto the stack if
2736 not enough registers are available) with the last few bytes of the
2737 structure being packed, left-justified, into the last register/stack slot.
2738 GCC handles this correctly if the last word is in a stack slot, but we
2739 have to generate a special, PARALLEL RTX if the last word is in an
2740 argument register. */
2741 if (type
2742 && TYPE_MODE (type) == BLKmode
2743 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2744 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2745 && (size % UNITS_PER_WORD != 0)
2746 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2747 {
2748 rtx arg_regs [NPARM_REGS];
2749 int nregs;
2750 rtx result;
2751 rtvec rtvec;
2752
2753 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2754 {
2755 arg_regs [nregs] =
2756 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2757 GEN_INT (nregs * UNITS_PER_WORD));
2758 nregs ++;
2759 }
2760
2761 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2762 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2763 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2764 arg_regs[3], arg_regs[4], arg_regs[5]);
2765
2766 result = gen_rtx_PARALLEL (mode, rtvec);
2767 return result;
2768 }
2769
2770 return gen_rtx_REG (mode, reg);
2771}
2772
2773rtx
cde0f3fd 2774mcore_function_value (const_tree valtype, const_tree func)
8f90be4c 2775{
ef4bddc2 2776 machine_mode mode;
8f90be4c
NC
2777 int unsigned_p;
2778
2779 mode = TYPE_MODE (valtype);
2780
cde0f3fd 2781 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2782 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2783
2784 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2785}
2786
2787/* Define where to put the arguments to a function.
2788 Value is zero to push the argument on the stack,
2789 or a hard register in which to store the argument.
2790
2791 MODE is the argument's machine mode.
2792 TYPE is the data type of the argument (as a tree).
2793 This is null for libcalls where that information may
2794 not be available.
2795 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2796 the preceding args and about the function being called.
2797 NAMED is nonzero if this argument is a named parameter
2798 (otherwise it is an extra parameter matching an ellipsis).
2799
2800 On MCore the first args are normally in registers
2801 and the rest are pushed. Any arg that starts within the first
2802 NPARM_REGS words is at least partially passed in a register unless
2803 its data type forbids. */
08903e08 2804
4665ac17 2805static rtx
ef4bddc2 2806mcore_function_arg (cumulative_args_t cum, machine_mode mode,
4665ac17 2807 const_tree type, bool named)
8f90be4c
NC
2808{
2809 int arg_reg;
2810
88042663 2811 if (! named || mode == VOIDmode)
8f90be4c
NC
2812 return 0;
2813
fe984136 2814 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2815 return 0;
2816
d5cc9181 2817 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2818
2819 if (arg_reg < NPARM_REGS)
2820 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2821
2822 return 0;
2823}
2824
4665ac17 2825static void
ef4bddc2 2826mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
4665ac17
NF
2827 const_tree type, bool named ATTRIBUTE_UNUSED)
2828{
d5cc9181
JR
2829 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2830
4665ac17
NF
2831 *cum = (ROUND_REG (*cum, mode)
2832 + (int)named * mcore_num_arg_regs (mode, type));
2833}
2834
c2ed6cf8 2835static unsigned int
ef4bddc2 2836mcore_function_arg_boundary (machine_mode mode,
c2ed6cf8
NF
2837 const_tree type ATTRIBUTE_UNUSED)
2838{
2839 /* Doubles must be aligned to an 8 byte boundary. */
2840 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2841 ? BIGGEST_ALIGNMENT
2842 : PARM_BOUNDARY);
2843}
2844
78a52f11 2845/* Returns the number of bytes of argument registers required to hold *part*
a7c81bc1
RS
2846 of argument ARG. If the argument fits entirely in the argument registers,
2847 or entirely on the stack, then 0 is returned. CUM is the number of
2848 argument registers already used by earlier parameters to the function. */
08903e08 2849
78a52f11 2850static int
a7c81bc1 2851mcore_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
8f90be4c 2852{
a7c81bc1 2853 int reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
8f90be4c 2854
a7c81bc1 2855 if (!arg.named)
8f90be4c
NC
2856 return 0;
2857
a7c81bc1 2858 if (targetm.calls.must_pass_in_stack (arg.mode, arg.type))
8f90be4c
NC
2859 return 0;
2860
2861 /* REG is not the *hardware* register number of the register that holds
2862 the argument, it is the *argument* register number. So for example,
2863 the first argument to a function goes in argument register 0, which
2864 translates (for the MCore) into hardware register 2. The second
2865 argument goes into argument register 1, which translates into hardware
2866 register 3, and so on. NPARM_REGS is the number of argument registers
2867 supported by the target, not the maximum hardware register number of
2868 the target. */
2869 if (reg >= NPARM_REGS)
2870 return 0;
2871
2872 /* If the argument fits entirely in registers, return 0. */
a7c81bc1 2873 if (reg + mcore_num_arg_regs (arg.mode, arg.type) <= NPARM_REGS)
8f90be4c
NC
2874 return 0;
2875
2876 /* The argument overflows the number of available argument registers.
2877 Compute how many argument registers have not yet been assigned to
2878 hold an argument. */
2879 reg = NPARM_REGS - reg;
2880
2881 /* Return partially in registers and partially on the stack. */
78a52f11 2882 return reg * UNITS_PER_WORD;
8f90be4c
NC
2883}
2884\f
a0ab749a 2885/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2886
8f90be4c 2887int
08903e08 2888mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2889{
2890 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2891}
2892
a0ab749a 2893/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2894
8f90be4c 2895int
08903e08 2896mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2897{
2898 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2899}
2900
2901/* Mark a DECL as being dllexport'd. */
08903e08 2902
8f90be4c 2903static void
08903e08 2904mcore_mark_dllexport (tree decl)
8f90be4c 2905{
cbd3488b 2906 const char * oldname;
8f90be4c
NC
2907 char * newname;
2908 rtx rtlname;
2909 tree idp;
2910
2911 rtlname = XEXP (DECL_RTL (decl), 0);
2912
6e1f65b5
NS
2913 if (GET_CODE (rtlname) == MEM)
2914 rtlname = XEXP (rtlname, 0);
2915 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2916 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2917
2918 if (mcore_dllexport_name_p (oldname))
2919 return; /* Already done. */
2920
5ead67f6 2921 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2922 sprintf (newname, "@e.%s", oldname);
2923
2924 /* We pass newname through get_identifier to ensure it has a unique
2925 address. RTL processing can sometimes peek inside the symbol ref
2926 and compare the string's addresses to see if two symbols are
2927 identical. */
2928 /* ??? At least I think that's why we do this. */
2929 idp = get_identifier (newname);
2930
2931 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2932 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2933}
2934
2935/* Mark a DECL as being dllimport'd. */
08903e08 2936
8f90be4c 2937static void
08903e08 2938mcore_mark_dllimport (tree decl)
8f90be4c 2939{
cbd3488b 2940 const char * oldname;
8f90be4c
NC
2941 char * newname;
2942 tree idp;
2943 rtx rtlname;
2944 rtx newrtl;
2945
2946 rtlname = XEXP (DECL_RTL (decl), 0);
2947
6e1f65b5
NS
2948 if (GET_CODE (rtlname) == MEM)
2949 rtlname = XEXP (rtlname, 0);
2950 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2951 oldname = XSTR (rtlname, 0);
8f90be4c 2952
6e1f65b5
NS
2953 gcc_assert (!mcore_dllexport_name_p (oldname));
2954 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2955 return; /* Already done. */
2956
2957 /* ??? One can well ask why we're making these checks here,
2958 and that would be a good question. */
2959
2960 /* Imported variables can't be initialized. */
2961 if (TREE_CODE (decl) == VAR_DECL
2962 && !DECL_VIRTUAL_P (decl)
2963 && DECL_INITIAL (decl))
2964 {
dee15844 2965 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2966 return;
2967 }
2968
2969 /* `extern' needn't be specified with dllimport.
2970 Specify `extern' now and hope for the best. Sigh. */
2971 if (TREE_CODE (decl) == VAR_DECL
2972 /* ??? Is this test for vtables needed? */
2973 && !DECL_VIRTUAL_P (decl))
2974 {
2975 DECL_EXTERNAL (decl) = 1;
2976 TREE_PUBLIC (decl) = 1;
2977 }
2978
5ead67f6 2979 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2980 sprintf (newname, "@i.__imp_%s", oldname);
2981
2982 /* We pass newname through get_identifier to ensure it has a unique
2983 address. RTL processing can sometimes peek inside the symbol ref
2984 and compare the string's addresses to see if two symbols are
2985 identical. */
2986 /* ??? At least I think that's why we do this. */
2987 idp = get_identifier (newname);
2988
f1c25d3b
KH
2989 newrtl = gen_rtx_MEM (Pmode,
2990 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2991 IDENTIFIER_POINTER (idp)));
2992 XEXP (DECL_RTL (decl), 0) = newrtl;
2993}
2994
2995static int
08903e08 2996mcore_dllexport_p (tree decl)
8f90be4c
NC
2997{
2998 if ( TREE_CODE (decl) != VAR_DECL
2999 && TREE_CODE (decl) != FUNCTION_DECL)
3000 return 0;
3001
91d231cb 3002 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3003}
3004
3005static int
08903e08 3006mcore_dllimport_p (tree decl)
8f90be4c
NC
3007{
3008 if ( TREE_CODE (decl) != VAR_DECL
3009 && TREE_CODE (decl) != FUNCTION_DECL)
3010 return 0;
3011
91d231cb 3012 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3013}
3014
fb49053f 3015/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3016 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3017
3018static void
08903e08 3019mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3020{
8f90be4c
NC
3021 /* Mark the decl so we can tell from the rtl whether the object is
3022 dllexport'd or dllimport'd. */
3023 if (mcore_dllexport_p (decl))
3024 mcore_mark_dllexport (decl);
3025 else if (mcore_dllimport_p (decl))
3026 mcore_mark_dllimport (decl);
3027
3028 /* It might be that DECL has already been marked as dllimport, but
3029 a subsequent definition nullified that. The attribute is gone
3030 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3031 else if ((TREE_CODE (decl) == FUNCTION_DECL
3032 || TREE_CODE (decl) == VAR_DECL)
3033 && DECL_RTL (decl) != NULL_RTX
3034 && GET_CODE (DECL_RTL (decl)) == MEM
3035 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3036 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3037 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3038 {
3cce094d 3039 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3040 tree idp = get_identifier (oldname + 9);
f1c25d3b 3041 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3042
3043 XEXP (DECL_RTL (decl), 0) = newrtl;
3044
3045 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3046 ??? We leave these alone for now. */
3047 }
3048}
3049
772c5265
RH
3050/* Undo the effects of the above. */
3051
3052static const char *
08903e08 3053mcore_strip_name_encoding (const char * str)
772c5265
RH
3054{
3055 return str + (str[0] == '@' ? 3 : 0);
3056}
3057
8f90be4c
NC
3058/* MCore specific attribute support.
3059 dllexport - for exporting a function/variable that will live in a dll
3060 dllimport - for importing a function/variable from a dll
3061 naked - do not create a function prologue/epilogue. */
8f90be4c 3062
91d231cb
JM
3063/* Handle a "naked" attribute; arguments as in
3064 struct attribute_spec.handler. */
08903e08 3065
91d231cb 3066static tree
08903e08
SB
3067mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3068 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3069{
d45eae79 3070 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3071 {
29d08eba
JM
3072 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3073 name);
91d231cb 3074 *no_add_attrs = true;
8f90be4c
NC
3075 }
3076
91d231cb 3077 return NULL_TREE;
8f90be4c
NC
3078}
3079
ae46c4e0
RH
3080/* ??? It looks like this is PE specific? Oh well, this is what the
3081 old code did as well. */
8f90be4c 3082
ae46c4e0 3083static void
08903e08 3084mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3085{
3086 int len;
0139adca 3087 const char * name;
8f90be4c 3088 char * string;
f27cd94d 3089 const char * prefix;
8f90be4c
NC
3090
3091 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3092
3093 /* Strip off any encoding in name. */
772c5265 3094 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3095
3096 /* The object is put in, for example, section .text$foo.
3097 The linker will then ultimately place them in .text
3098 (everything from the $ on is stripped). */
3099 if (TREE_CODE (decl) == FUNCTION_DECL)
3100 prefix = ".text$";
f710504c 3101 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3102 section might have relocs against it. */
4e4d733e 3103 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3104 prefix = ".rdata$";
3105 else
3106 prefix = ".data$";
3107
3108 len = strlen (name) + strlen (prefix);
5ead67f6 3109 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3110
3111 sprintf (string, "%s%s", prefix, name);
3112
f961457f 3113 set_decl_section_name (decl, string);
8f90be4c
NC
3114}
3115
3116int
08903e08 3117mcore_naked_function_p (void)
8f90be4c 3118{
91d231cb 3119 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3120}
7c262518 3121
d45eae79
SL
3122static bool
3123mcore_warn_func_return (tree decl)
3124{
3125 /* Naked functions are implemented entirely in assembly, including the
3126 return sequence, so suppress warnings about this. */
3127 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3128}
3129
ede75ee8 3130#ifdef OBJECT_FORMAT_ELF
7c262518 3131static void
c18a5b6c
MM
3132mcore_asm_named_section (const char *name,
3133 unsigned int flags ATTRIBUTE_UNUSED,
3134 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3135{
3136 fprintf (asm_out_file, "\t.section %s\n", name);
3137}
ede75ee8 3138#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3139
dc7efe6e
KH
3140/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3141
09a2b93a
KH
3142static void
3143mcore_external_libcall (rtx fun)
3144{
3145 fprintf (asm_out_file, "\t.import\t");
3146 assemble_name (asm_out_file, XSTR (fun, 0));
3147 fprintf (asm_out_file, "\n");
3148}
3149
dc7efe6e
KH
3150/* Worker function for TARGET_RETURN_IN_MEMORY. */
3151
09a2b93a 3152static bool
586de218 3153mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3154{
586de218 3155 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3156 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3157}
71e0af3c
RH
3158
3159/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3160 Output assembler code for a block containing the constant parts
3161 of a trampoline, leaving space for the variable parts.
3162
3163 On the MCore, the trampoline looks like:
3164 lrw r1, function
3165 lrw r13, area
3166 jmp r13
3167 or r0, r0
3168 .literals */
3169
3170static void
3171mcore_asm_trampoline_template (FILE *f)
3172{
3173 fprintf (f, "\t.short 0x7102\n");
3174 fprintf (f, "\t.short 0x7d02\n");
3175 fprintf (f, "\t.short 0x00cd\n");
3176 fprintf (f, "\t.short 0x1e00\n");
3177 fprintf (f, "\t.long 0\n");
3178 fprintf (f, "\t.long 0\n");
3179}
3180
3181/* Worker function for TARGET_TRAMPOLINE_INIT. */
3182
3183static void
3184mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3185{
3186 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3187 rtx mem;
3188
3189 emit_block_move (m_tramp, assemble_trampoline_template (),
3190 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3191
3192 mem = adjust_address (m_tramp, SImode, 8);
3193 emit_move_insn (mem, chain_value);
3194 mem = adjust_address (m_tramp, SImode, 12);
3195 emit_move_insn (mem, fnaddr);
3196}
1a627b35
RS
3197
3198/* Implement TARGET_LEGITIMATE_CONSTANT_P
3199
3200 On the MCore, allow anything but a double. */
3201
3202static bool
ef4bddc2 3203mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1a627b35
RS
3204{
3205 return GET_CODE (x) != CONST_DOUBLE;
3206}
e7c6980e
AS
3207
3208/* Helper function for `mcore_legitimate_address_p'. */
3209
3210static bool
3211mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3212{
3213 if (strict_p)
3214 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3215 else
3216 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3217}
3218
3219static bool
3220mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3221{
3222 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3223}
3224
3225/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3226 and for DI is 0..56 because we use two SI loads, etc. */
3227
3228static bool
3229mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3230{
3231 if (CONST_INT_P (op))
3232 {
3233 if (GET_MODE_SIZE (mode) >= 4
3234 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3235 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3236 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3237 return true;
3238 if (GET_MODE_SIZE (mode) == 2
3239 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3240 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3241 return true;
3242 if (GET_MODE_SIZE (mode) == 1
3243 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3244 return true;
3245 }
3246 return false;
3247}
3248
3249
3250/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3251
3252 Allow REG
3253 REG + disp */
3254
3255static bool
3256mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3257 addr_space_t as)
3258{
3259 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3260
3261 if (mcore_base_register_rtx_p (x, strict_p))
3262 return true;
3263 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3264 {
3265 rtx xop0 = XEXP (x, 0);
3266 rtx xop1 = XEXP (x, 1);
3267 if (mcore_base_register_rtx_p (xop0, strict_p)
3268 && mcore_legitimate_index_p (mode, xop1))
3269 return true;
3270 if (mcore_base_register_rtx_p (xop1, strict_p)
3271 && mcore_legitimate_index_p (mode, xop0))
3272 return true;
3273 }
3274
3275 return false;
3276}
3277
f939c3e6
RS
3278/* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
3279 even registers. */
3280
3281static bool
3282mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3283{
3284 if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
3285 return (regno & 1) == 0;
3286
3287 return regno < 18;
3288}
99e1629f
RS
3289
3290/* Implement TARGET_MODES_TIEABLE_P. */
3291
3292static bool
3293mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3294{
3295 return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
3296}