]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
a5544970 2 Copyright (C) 1993-2019 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
8fcc61f8
RS
20#define IN_TARGET_CODE 1
21
bc27e96c 22#include "config.h"
4bd048ef 23#include "system.h"
4977bab6 24#include "coretypes.h"
c7131fb2 25#include "backend.h"
e11c4407 26#include "target.h"
4816b8e4 27#include "rtl.h"
e11c4407 28#include "tree.h"
c7131fb2 29#include "df.h"
4d0cdd0c 30#include "memmodel.h"
e11c4407
AM
31#include "tm_p.h"
32#include "stringpool.h"
314e6352 33#include "attribs.h"
e11c4407
AM
34#include "emit-rtl.h"
35#include "diagnostic-core.h"
d8a2d370
DN
36#include "stor-layout.h"
37#include "varasm.h"
d8a2d370 38#include "calls.h"
8f90be4c 39#include "mcore.h"
8f90be4c 40#include "output.h"
36566b39 41#include "explow.h"
8f90be4c 42#include "expr.h"
60393bbc 43#include "cfgrtl.h"
9b2b7279 44#include "builtins.h"
0f8012fb 45#include "regs.h"
8f90be4c 46
994c5d85 47/* This file should be included last. */
d58627a0
RS
48#include "target-def.h"
49
8f90be4c
NC
50/* For dumping information about frame sizes. */
51char * mcore_current_function_name = 0;
52long mcore_current_compilation_timestamp = 0;
53
54/* Global variables for machine-dependent things. */
55
8f90be4c
NC
56/* Provides the class number of the smallest class containing
57 reg number. */
5a82ecd9 58const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
59{
60 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
62 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
63 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
64 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
65};
66
f27cd94d
NC
67struct mcore_frame
68{
08903e08
SB
69 int arg_size; /* Stdarg spills (bytes). */
70 int reg_size; /* Non-volatile reg saves (bytes). */
71 int reg_mask; /* Non-volatile reg saves. */
72 int local_size; /* Locals. */
73 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
74 int pad_outbound;
75 int pad_local;
76 int pad_reg;
77 /* Describe the steps we'll use to grow it. */
08903e08 78#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
79 int growth[MAX_STACK_GROWS];
80 int arg_offset;
81 int reg_offset;
82 int reg_growth;
83 int local_growth;
84};
85
86typedef enum
87{
88 COND_NO,
89 COND_MOV_INSN,
90 COND_CLR_INSN,
91 COND_INC_INSN,
92 COND_DEC_INSN,
93 COND_BRANCH_INSN
94}
95cond_type;
96
08903e08
SB
97static void output_stack_adjust (int, int);
98static int calc_live_regs (int *);
e0416079 99static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
ef4bddc2 100static const char * output_inline_const (machine_mode, rtx *);
08903e08 101static void layout_mcore_frame (struct mcore_frame *);
ef4bddc2 102static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
08903e08 103static cond_type is_cond_candidate (rtx);
6251fe93 104static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
b32d5189 105static rtx_insn *conditionalize_block (rtx_insn *);
08903e08
SB
106static void conditionalize_optimization (void);
107static void mcore_reorg (void);
ef4bddc2 108static rtx handle_structs_in_regs (machine_mode, const_tree, int);
08903e08
SB
109static void mcore_mark_dllexport (tree);
110static void mcore_mark_dllimport (tree);
111static int mcore_dllexport_p (tree);
112static int mcore_dllimport_p (tree);
08903e08 113static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 114#ifdef OBJECT_FORMAT_ELF
08903e08 115static void mcore_asm_named_section (const char *,
c18a5b6c 116 unsigned int, tree);
ede75ee8 117#endif
349f851e 118static void mcore_print_operand (FILE *, rtx, int);
cc8ca59e 119static void mcore_print_operand_address (FILE *, machine_mode, rtx);
349f851e 120static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
121static void mcore_unique_section (tree, int);
122static void mcore_encode_section_info (tree, rtx, int);
123static const char *mcore_strip_name_encoding (const char *);
d96be87b
JBG
124static int mcore_const_costs (rtx, RTX_CODE);
125static int mcore_and_cost (rtx);
126static int mcore_ior_cost (rtx);
e548c9df 127static bool mcore_rtx_costs (rtx, machine_mode, int, int,
68f932c4 128 int *, bool);
09a2b93a 129static void mcore_external_libcall (rtx);
586de218 130static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 131static int mcore_arg_partial_bytes (cumulative_args_t,
ef4bddc2 132 machine_mode,
78a52f11 133 tree, bool);
d5cc9181 134static rtx mcore_function_arg (cumulative_args_t,
ef4bddc2 135 machine_mode,
4665ac17 136 const_tree, bool);
d5cc9181 137static void mcore_function_arg_advance (cumulative_args_t,
ef4bddc2 138 machine_mode,
4665ac17 139 const_tree, bool);
ef4bddc2 140static unsigned int mcore_function_arg_boundary (machine_mode,
c2ed6cf8 141 const_tree);
71e0af3c
RH
142static void mcore_asm_trampoline_template (FILE *);
143static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 144static bool mcore_warn_func_return (tree);
c5387660 145static void mcore_option_override (void);
ef4bddc2 146static bool mcore_legitimate_constant_p (machine_mode, rtx);
e7c6980e
AS
147static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
148 addr_space_t);
f939c3e6 149static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
99e1629f 150static bool mcore_modes_tieable_p (machine_mode, machine_mode);
5a82ecd9
ILT
151\f
152/* MCore specific attributes. */
153
154static const struct attribute_spec mcore_attribute_table[] =
155{
4849deb1
JJ
156 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
157 affects_type_identity, handler, exclude } */
158 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
159 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
160 { "naked", 0, 0, true, false, false, false,
161 mcore_handle_naked_attribute, NULL },
162 { NULL, 0, 0, false, false, false, false, NULL, NULL }
5a82ecd9 163};
672a6f42
NB
164\f
165/* Initialize the GCC target structure. */
09a2b93a
KH
166#undef TARGET_ASM_EXTERNAL_LIBCALL
167#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
168
b2ca3702 169#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
170#undef TARGET_MERGE_DECL_ATTRIBUTES
171#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
172#endif
173
301d03af 174#ifdef OBJECT_FORMAT_ELF
08903e08 175#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 176#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 177#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
178#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
179#endif
180
349f851e
NF
181#undef TARGET_PRINT_OPERAND
182#define TARGET_PRINT_OPERAND mcore_print_operand
183#undef TARGET_PRINT_OPERAND_ADDRESS
184#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
185#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
186#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
187
08903e08
SB
188#undef TARGET_ATTRIBUTE_TABLE
189#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
190#undef TARGET_ASM_UNIQUE_SECTION
191#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
192#undef TARGET_ASM_FUNCTION_RODATA_SECTION
193#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
194#undef TARGET_ENCODE_SECTION_INFO
195#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
196#undef TARGET_STRIP_NAME_ENCODING
197#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
198#undef TARGET_RTX_COSTS
199#define TARGET_RTX_COSTS mcore_rtx_costs
200#undef TARGET_ADDRESS_COST
b413068c 201#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
202#undef TARGET_MACHINE_DEPENDENT_REORG
203#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 204
cde0f3fd
PB
205#undef TARGET_PROMOTE_FUNCTION_MODE
206#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 207#undef TARGET_PROMOTE_PROTOTYPES
586de218 208#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 209
09a2b93a
KH
210#undef TARGET_RETURN_IN_MEMORY
211#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
212#undef TARGET_MUST_PASS_IN_STACK
213#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
214#undef TARGET_PASS_BY_REFERENCE
215#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
216#undef TARGET_ARG_PARTIAL_BYTES
217#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
218#undef TARGET_FUNCTION_ARG
219#define TARGET_FUNCTION_ARG mcore_function_arg
220#undef TARGET_FUNCTION_ARG_ADVANCE
221#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
222#undef TARGET_FUNCTION_ARG_BOUNDARY
223#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
224
225#undef TARGET_SETUP_INCOMING_VARARGS
226#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
227
71e0af3c
RH
228#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
229#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
230#undef TARGET_TRAMPOLINE_INIT
231#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
232
c5387660
JM
233#undef TARGET_OPTION_OVERRIDE
234#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 235
1a627b35
RS
236#undef TARGET_LEGITIMATE_CONSTANT_P
237#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
e7c6980e
AS
238#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
239#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
1a627b35 240
d81db636
SB
241#undef TARGET_LRA_P
242#define TARGET_LRA_P hook_bool_void_false
243
d45eae79
SL
244#undef TARGET_WARN_FUNC_RETURN
245#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
246
f939c3e6
RS
247#undef TARGET_HARD_REGNO_MODE_OK
248#define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
249
99e1629f
RS
250#undef TARGET_MODES_TIEABLE_P
251#define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
252
58e17cf8
RS
253#undef TARGET_CONSTANT_ALIGNMENT
254#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
255
1169a206
NC
256#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
257#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
258
f6897b10 259struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 260\f
8f90be4c
NC
261/* Adjust the stack and return the number of bytes taken to do it. */
262static void
08903e08 263output_stack_adjust (int direction, int size)
8f90be4c 264{
4816b8e4 265 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
266 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
267 {
f1c25d3b 268 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 269 rtx memref;
08903e08 270
8f90be4c
NC
271 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
272 do
273 {
274 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 275 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
276 MEM_VOLATILE_P (memref) = 1;
277 emit_insn (gen_movsi (memref, stack_pointer_rtx));
278 size -= mcore_stack_increment;
279 }
280 while (size > mcore_stack_increment);
281
4816b8e4
NC
282 /* SIZE is now the residual for the last adjustment,
283 which doesn't require a probe. */
8f90be4c
NC
284 }
285
286 if (size)
287 {
288 rtx insn;
289 rtx val = GEN_INT (size);
290
291 if (size > 32)
292 {
f1c25d3b 293 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
294 emit_insn (gen_movsi (nval, val));
295 val = nval;
296 }
297
298 if (direction > 0)
299 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
300 else
301 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
302
303 emit_insn (insn);
304 }
305}
306
4816b8e4
NC
307/* Work out the registers which need to be saved,
308 both as a mask and a count. */
309
8f90be4c 310static int
08903e08 311calc_live_regs (int * count)
8f90be4c
NC
312{
313 int reg;
314 int live_regs_mask = 0;
315
316 * count = 0;
317
318 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
319 {
6fb5fa3c 320 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
321 {
322 (*count)++;
323 live_regs_mask |= (1 << reg);
324 }
325 }
326
327 return live_regs_mask;
328}
329
330/* Print the operand address in x to the stream. */
4816b8e4 331
349f851e 332static void
cc8ca59e 333mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
8f90be4c
NC
334{
335 switch (GET_CODE (x))
336 {
337 case REG:
338 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
339 break;
340
341 case PLUS:
342 {
343 rtx base = XEXP (x, 0);
344 rtx index = XEXP (x, 1);
345
346 if (GET_CODE (base) != REG)
347 {
348 /* Ensure that BASE is a register (one of them must be). */
349 rtx temp = base;
350 base = index;
351 index = temp;
352 }
353
354 switch (GET_CODE (index))
355 {
356 case CONST_INT:
fd7b8952
KG
357 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
358 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
359 break;
360
361 default:
6e1f65b5 362 gcc_unreachable ();
8f90be4c
NC
363 }
364 }
365
366 break;
367
368 default:
369 output_addr_const (stream, x);
370 break;
371 }
372}
373
349f851e
NF
374static bool
375mcore_print_operand_punct_valid_p (unsigned char code)
376{
377 return (code == '.' || code == '#' || code == '*' || code == '^'
378 || code == '!');
379}
380
8f90be4c
NC
381/* Print operand x (an rtx) in assembler syntax to file stream
382 according to modifier code.
383
112cdef5 384 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
385 a double word value
386 'O' print a constant without the #
387 'M' print a constant as its negative
388 'P' print log2 of a power of two
389 'Q' print log2 of an inverse of a power of two
390 'U' print register for ldm/stm instruction
4816b8e4
NC
391 'X' print byte number for xtrbN instruction. */
392
349f851e 393static void
08903e08 394mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
395{
396 switch (code)
397 {
398 case 'N':
399 if (INTVAL(x) == -1)
400 fprintf (asm_out_file, "32");
401 else
402 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
403 break;
404 case 'P':
6e3a343d 405 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
406 break;
407 case 'Q':
408 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
409 break;
410 case 'O':
fd7b8952 411 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
412 break;
413 case 'M':
fd7b8952 414 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
415 break;
416 case 'R':
417 /* Next location along in memory or register. */
418 switch (GET_CODE (x))
419 {
420 case REG:
421 fputs (reg_names[REGNO (x) + 1], (stream));
422 break;
423 case MEM:
b72f00af 424 mcore_print_operand_address
cc8ca59e 425 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
426 break;
427 default:
6e1f65b5 428 gcc_unreachable ();
8f90be4c
NC
429 }
430 break;
431 case 'U':
432 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
433 reg_names[REGNO (x) + 3]);
434 break;
435 case 'x':
fd7b8952 436 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
437 break;
438 case 'X':
fd7b8952 439 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
440 break;
441
442 default:
443 switch (GET_CODE (x))
444 {
445 case REG:
446 fputs (reg_names[REGNO (x)], (stream));
447 break;
448 case MEM:
cc8ca59e 449 output_address (GET_MODE (x), XEXP (x, 0));
8f90be4c
NC
450 break;
451 default:
452 output_addr_const (stream, x);
453 break;
454 }
455 break;
456 }
457}
458
459/* What does a constant cost ? */
4816b8e4 460
3c50106f 461static int
08903e08 462mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 463{
6e3a343d 464 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
465
466 /* Easy constants. */
467 if ( CONST_OK_FOR_I (val)
468 || CONST_OK_FOR_M (val)
469 || CONST_OK_FOR_N (val)
470 || (code == PLUS && CONST_OK_FOR_L (val)))
471 return 1;
472 else if (code == AND
473 && ( CONST_OK_FOR_M (~val)
474 || CONST_OK_FOR_N (~val)))
475 return 2;
476 else if (code == PLUS
477 && ( CONST_OK_FOR_I (-val)
478 || CONST_OK_FOR_M (-val)
479 || CONST_OK_FOR_N (-val)))
480 return 2;
481
482 return 5;
483}
484
485/* What does an and instruction cost - we do this b/c immediates may
486 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
487 out. Otherwise we'll get bad code (multiple reloads of the same const). */
488
3c50106f 489static int
08903e08 490mcore_and_cost (rtx x)
8f90be4c 491{
6e3a343d 492 HOST_WIDE_INT val;
8f90be4c
NC
493
494 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
495 return 2;
496
497 val = INTVAL (XEXP (x, 1));
498
4816b8e4 499 /* Do it directly. */
8f90be4c
NC
500 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
501 return 2;
502 /* Takes one instruction to load. */
503 else if (const_ok_for_mcore (val))
504 return 3;
505 /* Takes two instructions to load. */
506 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
507 return 4;
508
4816b8e4 509 /* Takes a lrw to load. */
8f90be4c
NC
510 return 5;
511}
512
4816b8e4
NC
513/* What does an or cost - see and_cost(). */
514
3c50106f 515static int
08903e08 516mcore_ior_cost (rtx x)
8f90be4c 517{
6e3a343d 518 HOST_WIDE_INT val;
8f90be4c
NC
519
520 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
521 return 2;
522
523 val = INTVAL (XEXP (x, 1));
524
4816b8e4 525 /* Do it directly with bclri. */
8f90be4c
NC
526 if (CONST_OK_FOR_M (val))
527 return 2;
4816b8e4 528 /* Takes one instruction to load. */
8f90be4c
NC
529 else if (const_ok_for_mcore (val))
530 return 3;
4816b8e4 531 /* Takes two instructions to load. */
8f90be4c
NC
532 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
533 return 4;
534
4816b8e4 535 /* Takes a lrw to load. */
8f90be4c
NC
536 return 5;
537}
538
3c50106f 539static bool
e548c9df
AM
540mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
541 int opno ATTRIBUTE_UNUSED,
68f932c4 542 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f 543{
e548c9df
AM
544 int code = GET_CODE (x);
545
3c50106f
RH
546 switch (code)
547 {
548 case CONST_INT:
5a82ecd9 549 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
550 return true;
551 case CONST:
552 case LABEL_REF:
553 case SYMBOL_REF:
554 *total = 5;
555 return true;
556 case CONST_DOUBLE:
557 *total = 10;
558 return true;
559
560 case AND:
561 *total = COSTS_N_INSNS (mcore_and_cost (x));
562 return true;
563
564 case IOR:
565 *total = COSTS_N_INSNS (mcore_ior_cost (x));
566 return true;
567
568 case DIV:
569 case UDIV:
570 case MOD:
571 case UMOD:
572 case FLOAT:
573 case FIX:
574 *total = COSTS_N_INSNS (100);
575 return true;
576
577 default:
578 return false;
579 }
580}
581
f90b7a5a
PB
582/* Prepare the operands for a comparison. Return whether the branch/setcc
583 should reverse the operands. */
4816b8e4 584
f90b7a5a
PB
585bool
586mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 587{
f90b7a5a
PB
588 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
589 bool invert;
590
8f90be4c
NC
591 if (GET_CODE (op1) == CONST_INT)
592 {
6e3a343d 593 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
594
595 switch (code)
596 {
f90b7a5a
PB
597 case GTU:
598 /* Unsigned > 0 is the same as != 0; everything else is converted
599 below to LEU (reversed cmphs). */
600 if (val == 0)
601 code = NE;
602 break;
603
604 /* Check whether (LE A imm) can become (LT A imm + 1),
605 or (GT A imm) can become (GE A imm + 1). */
606 case GT:
8f90be4c
NC
607 case LE:
608 if (CONST_OK_FOR_J (val + 1))
609 {
f90b7a5a
PB
610 op1 = GEN_INT (val + 1);
611 code = code == LE ? LT : GE;
8f90be4c
NC
612 }
613 break;
614
615 default:
616 break;
617 }
618 }
f90b7a5a 619
8f90be4c
NC
620 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
621 op1 = force_reg (SImode, op1);
622
623 /* cmpnei: 0-31 (K immediate)
4816b8e4 624 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 625 invert = false;
8f90be4c
NC
626 switch (code)
627 {
4816b8e4 628 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 629 code = NE;
f90b7a5a 630 invert = true;
0c15dfc1 631 /* FALLTHRU */
4816b8e4
NC
632
633 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
634 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
635 op1 = force_reg (SImode, op1);
636 break;
637
4816b8e4 638 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 639 code = GT;
f90b7a5a 640 invert = true;
0c15dfc1 641 /* FALLTHRU */
4816b8e4
NC
642
643 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
644 if (GET_CODE (op1) == CONST_INT)
645 op1 = force_reg (SImode, op1);
646 break;
647
4816b8e4 648 case GE: /* Use inverted condition, cmplt. */
8f90be4c 649 code = LT;
f90b7a5a 650 invert = true;
0c15dfc1 651 /* FALLTHRU */
4816b8e4
NC
652
653 case LT: /* Use normal condition, cmplt. */
8f90be4c 654 if (GET_CODE (op1) == CONST_INT &&
08903e08 655 /* covered by btsti x,31. */
8f90be4c
NC
656 INTVAL (op1) != 0 &&
657 ! CONST_OK_FOR_J (INTVAL (op1)))
658 op1 = force_reg (SImode, op1);
659 break;
660
4816b8e4 661 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 662 /* We coped with unsigned > 0 above. */
6e1f65b5 663 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 664 code = LEU;
f90b7a5a 665 invert = true;
0c15dfc1 666 /* FALLTHRU */
4816b8e4 667
14bc6742 668 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
669 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
670 op1 = force_reg (SImode, op1);
671 break;
672
4816b8e4 673 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 674 code = GEU;
f90b7a5a 675 invert = true;
0c15dfc1 676 /* FALLTHRU */
4816b8e4
NC
677
678 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
679 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
680 op1 = force_reg (SImode, op1);
681 break;
682
683 default:
684 break;
685 }
686
f7df4a84 687 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
f90b7a5a 688 return invert;
8f90be4c
NC
689}
690
8f90be4c 691int
08903e08 692mcore_symbolic_address_p (rtx x)
8f90be4c
NC
693{
694 switch (GET_CODE (x))
695 {
696 case SYMBOL_REF:
697 case LABEL_REF:
698 return 1;
699 case CONST:
700 x = XEXP (x, 0);
701 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
702 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
703 && GET_CODE (XEXP (x, 1)) == CONST_INT);
704 default:
705 return 0;
706 }
707}
708
8f90be4c 709/* Functions to output assembly code for a function call. */
f27cd94d 710
8f90be4c 711char *
08903e08 712mcore_output_call (rtx operands[], int index)
8f90be4c
NC
713{
714 static char buffer[20];
715 rtx addr = operands [index];
716
717 if (REG_P (addr))
718 {
719 if (TARGET_CG_DATA)
720 {
6e1f65b5 721 gcc_assert (mcore_current_function_name);
8f90be4c
NC
722
723 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
724 "unknown", 1);
725 }
726
727 sprintf (buffer, "jsr\t%%%d", index);
728 }
729 else
730 {
731 if (TARGET_CG_DATA)
732 {
6e1f65b5
NS
733 gcc_assert (mcore_current_function_name);
734 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 735
6e1f65b5
NS
736 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
737 XSTR (addr, 0), 0);
8f90be4c
NC
738 }
739
740 sprintf (buffer, "jbsr\t%%%d", index);
741 }
742
743 return buffer;
744}
745
746/* Can we load a constant with a single instruction ? */
4816b8e4 747
54d58eaf 748int
6e3a343d 749const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
750{
751 if (value >= 0 && value <= 127)
752 return 1;
753
754 /* Try exact power of two. */
6e3a343d 755 if (CONST_OK_FOR_M (value))
8f90be4c
NC
756 return 1;
757
14bc6742 758 /* Try exact power of two - 1. */
6e3a343d 759 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
760 return 1;
761
762 return 0;
763}
764
765/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 766
8f90be4c 767int
6e3a343d 768mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 769{
6e3a343d 770 HOST_WIDE_INT x, y;
8f90be4c
NC
771
772 return try_constant_tricks (value, & x, & y) > 0;
773}
774
775/* Are we loading the constant using a not ? */
4816b8e4 776
8f90be4c 777int
6e3a343d 778mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 779{
6e3a343d 780 HOST_WIDE_INT x, y;
8f90be4c
NC
781
782 return try_constant_tricks (value, & x, & y) == 2;
783}
784
785/* Try tricks to load a constant inline and return the trick number if
786 success (0 is non-inlinable).
4816b8e4
NC
787
788 0: not inlinable
789 1: single instruction (do the usual thing)
790 2: single insn followed by a 'not'
791 3: single insn followed by a subi
792 4: single insn followed by an addi
793 5: single insn followed by rsubi
794 6: single insn followed by bseti
795 7: single insn followed by bclri
796 8: single insn followed by rotli
797 9: single insn followed by lsli
798 10: single insn followed by ixh
799 11: single insn followed by ixw. */
8f90be4c
NC
800
801static int
6e3a343d 802try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 803{
6e3a343d
NC
804 HOST_WIDE_INT i;
805 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
806
807 if (const_ok_for_mcore (value))
4816b8e4 808 return 1; /* Do the usual thing. */
8f90be4c 809
6e3a343d
NC
810 if (! TARGET_HARDLIT)
811 return 0;
812
813 if (const_ok_for_mcore (~value))
814 {
815 *x = ~value;
816 return 2;
817 }
818
819 for (i = 1; i <= 32; i++)
8f90be4c 820 {
6e3a343d 821 if (const_ok_for_mcore (value - i))
8f90be4c 822 {
6e3a343d
NC
823 *x = value - i;
824 *y = i;
825
826 return 3;
8f90be4c 827 }
6e3a343d
NC
828
829 if (const_ok_for_mcore (value + i))
8f90be4c 830 {
6e3a343d
NC
831 *x = value + i;
832 *y = i;
833
834 return 4;
8f90be4c 835 }
6e3a343d
NC
836 }
837
838 bit = 0x80000000ULL;
839
840 for (i = 0; i <= 31; i++)
841 {
842 if (const_ok_for_mcore (i - value))
8f90be4c 843 {
6e3a343d
NC
844 *x = i - value;
845 *y = i;
846
847 return 5;
8f90be4c 848 }
6e3a343d
NC
849
850 if (const_ok_for_mcore (value & ~bit))
8f90be4c 851 {
6e3a343d
NC
852 *y = bit;
853 *x = value & ~bit;
854 return 6;
8f90be4c 855 }
6e3a343d
NC
856
857 if (const_ok_for_mcore (value | bit))
8f90be4c 858 {
6e3a343d
NC
859 *y = ~bit;
860 *x = value | bit;
861
862 return 7;
8f90be4c 863 }
6e3a343d
NC
864
865 bit >>= 1;
866 }
867
868 shf = value;
869 rot = value;
870
871 for (i = 1; i < 31; i++)
872 {
873 int c;
874
875 /* MCore has rotate left. */
876 c = rot << 31;
877 rot >>= 1;
878 rot &= 0x7FFFFFFF;
879 rot |= c; /* Simulate rotate. */
880
881 if (const_ok_for_mcore (rot))
8f90be4c 882 {
6e3a343d
NC
883 *y = i;
884 *x = rot;
885
886 return 8;
887 }
888
889 if (shf & 1)
890 shf = 0; /* Can't use logical shift, low order bit is one. */
891
892 shf >>= 1;
893
894 if (shf != 0 && const_ok_for_mcore (shf))
895 {
896 *y = i;
897 *x = shf;
898
899 return 9;
8f90be4c
NC
900 }
901 }
6e3a343d
NC
902
903 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
904 {
905 *x = value / 3;
906
907 return 10;
908 }
909
910 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
911 {
912 *x = value / 5;
913
914 return 11;
915 }
8f90be4c
NC
916
917 return 0;
918}
919
8f90be4c
NC
920/* Check whether reg is dead at first. This is done by searching ahead
921 for either the next use (i.e., reg is live), a death note, or a set of
922 reg. Don't just use dead_or_set_p() since reload does not always mark
923 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
924 can ignore subregs by extracting the actual register. BRC */
925
8f90be4c 926int
b32d5189 927mcore_is_dead (rtx_insn *first, rtx reg)
8f90be4c 928{
b32d5189 929 rtx_insn *insn;
8f90be4c
NC
930
931 /* For mcore, subregs can't live independently of their parent regs. */
932 if (GET_CODE (reg) == SUBREG)
933 reg = SUBREG_REG (reg);
934
935 /* Dies immediately. */
936 if (dead_or_set_p (first, reg))
937 return 1;
938
939 /* Look for conclusive evidence of live/death, otherwise we have
940 to assume that it is live. */
941 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
942 {
b64925dc 943 if (JUMP_P (insn))
8f90be4c
NC
944 return 0; /* We lose track, assume it is alive. */
945
b64925dc 946 else if (CALL_P (insn))
8f90be4c
NC
947 {
948 /* Call's might use it for target or register parms. */
949 if (reg_referenced_p (reg, PATTERN (insn))
950 || find_reg_fusage (insn, USE, reg))
951 return 0;
952 else if (dead_or_set_p (insn, reg))
953 return 1;
954 }
b64925dc 955 else if (NONJUMP_INSN_P (insn))
8f90be4c
NC
956 {
957 if (reg_referenced_p (reg, PATTERN (insn)))
958 return 0;
959 else if (dead_or_set_p (insn, reg))
960 return 1;
961 }
962 }
963
1e5f1716 964 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
965 that control flow hid the use from us -- "I'm not dead yet". */
966 return 0;
967}
968
8f90be4c 969/* Count the number of ones in mask. */
4816b8e4 970
8f90be4c 971int
6e3a343d 972mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 973{
4816b8e4 974 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
975 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
976 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
977 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
978 mask = ((mask >> 8) + mask);
979
980 return (mask + (mask >> 16)) & 0xff;
981}
982
4816b8e4
NC
983/* Count the number of zeros in mask. */
984
8f90be4c 985int
6e3a343d 986mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
987{
988 return 32 - mcore_num_ones (mask);
989}
990
991/* Determine byte being masked. */
4816b8e4 992
8f90be4c 993int
08903e08 994mcore_byte_offset (unsigned int mask)
8f90be4c 995{
11f9ed1a 996 if (mask == 0x00ffffffL)
8f90be4c 997 return 0;
11f9ed1a 998 else if (mask == 0xff00ffffL)
8f90be4c 999 return 1;
11f9ed1a 1000 else if (mask == 0xffff00ffL)
8f90be4c 1001 return 2;
11f9ed1a 1002 else if (mask == 0xffffff00L)
8f90be4c
NC
1003 return 3;
1004
1005 return -1;
1006}
1007
1008/* Determine halfword being masked. */
4816b8e4 1009
8f90be4c 1010int
08903e08 1011mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
1012{
1013 if (mask == 0x0000ffffL)
1014 return 0;
11f9ed1a 1015 else if (mask == 0xffff0000L)
8f90be4c
NC
1016 return 1;
1017
1018 return -1;
1019}
1020
1021/* Output a series of bseti's corresponding to mask. */
4816b8e4 1022
f27cd94d 1023const char *
08903e08 1024mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
1025{
1026 rtx out_operands[2];
1027 int bit;
1028
1029 out_operands[0] = dst;
1030
1031 for (bit = 0; bit < 32; bit++)
1032 {
1033 if ((mask & 0x1) == 0x1)
1034 {
1035 out_operands[1] = GEN_INT (bit);
1036
1037 output_asm_insn ("bseti\t%0,%1", out_operands);
1038 }
1039 mask >>= 1;
1040 }
1041
1042 return "";
1043}
1044
1045/* Output a series of bclri's corresponding to mask. */
4816b8e4 1046
f27cd94d 1047const char *
08903e08 1048mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1049{
1050 rtx out_operands[2];
1051 int bit;
1052
1053 out_operands[0] = dst;
1054
1055 for (bit = 0; bit < 32; bit++)
1056 {
1057 if ((mask & 0x1) == 0x0)
1058 {
1059 out_operands[1] = GEN_INT (bit);
1060
1061 output_asm_insn ("bclri\t%0,%1", out_operands);
1062 }
1063
1064 mask >>= 1;
1065 }
1066
1067 return "";
1068}
1069
1070/* Output a conditional move of two constants that are +/- 1 within each
1071 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1072 really worth the effort. */
4816b8e4 1073
f27cd94d 1074const char *
08903e08 1075mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1076{
6e3a343d
NC
1077 HOST_WIDE_INT load_value;
1078 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1079 rtx out_operands[4];
1080
1081 out_operands[0] = operands[0];
1082
4816b8e4 1083 /* Check to see which constant is loadable. */
8f90be4c
NC
1084 if (const_ok_for_mcore (INTVAL (operands[1])))
1085 {
1086 out_operands[1] = operands[1];
1087 out_operands[2] = operands[2];
1088 }
1089 else if (const_ok_for_mcore (INTVAL (operands[2])))
1090 {
1091 out_operands[1] = operands[2];
1092 out_operands[2] = operands[1];
1093
4816b8e4 1094 /* Complement test since constants are swapped. */
8f90be4c
NC
1095 cmp_t = (cmp_t == 0);
1096 }
1097 load_value = INTVAL (out_operands[1]);
1098 adjust_value = INTVAL (out_operands[2]);
1099
4816b8e4 1100 /* First output the test if folded into the pattern. */
8f90be4c
NC
1101
1102 if (test)
1103 output_asm_insn (test, operands);
1104
4816b8e4 1105 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1106 generated with a single instruction. maybe add general inlinable
1107 constants later (this will increase the # of patterns since the
4816b8e4 1108 instruction sequence has a different length attribute). */
8f90be4c
NC
1109 if (load_value >= 0 && load_value <= 127)
1110 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1111 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1112 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1113 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1114 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1115
4816b8e4 1116 /* Output the constant adjustment. */
8f90be4c
NC
1117 if (load_value > adjust_value)
1118 {
1119 if (cmp_t)
1120 output_asm_insn ("decf\t%0", out_operands);
1121 else
1122 output_asm_insn ("dect\t%0", out_operands);
1123 }
1124 else
1125 {
1126 if (cmp_t)
1127 output_asm_insn ("incf\t%0", out_operands);
1128 else
1129 output_asm_insn ("inct\t%0", out_operands);
1130 }
1131
1132 return "";
1133}
1134
1135/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1136 by an and (i.e. combine the not and the and into andn). BRC */
1137
f27cd94d 1138const char *
08903e08 1139mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1140{
6e3a343d 1141 HOST_WIDE_INT x, y;
8f90be4c 1142 rtx out_operands[3];
f27cd94d 1143 const char * load_op;
8f90be4c 1144 char buf[256];
6e1f65b5 1145 int trick_no;
8f90be4c 1146
6e1f65b5
NS
1147 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1148 gcc_assert (trick_no == 2);
8f90be4c
NC
1149
1150 out_operands[0] = operands[0];
6e3a343d 1151 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1152 out_operands[2] = operands[2];
1153
1154 if (x >= 0 && x <= 127)
1155 load_op = "movi\t%0,%1";
4816b8e4
NC
1156
1157 /* Try exact power of two. */
6e3a343d 1158 else if (CONST_OK_FOR_M (x))
8f90be4c 1159 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1160
1161 /* Try exact power of two - 1. */
6e3a343d 1162 else if (CONST_OK_FOR_N (x))
8f90be4c 1163 load_op = "bmaski\t%0,%N1";
4816b8e4 1164
6e3a343d
NC
1165 else
1166 {
1167 load_op = "BADMOVI-andn\t%0, %1";
1168 gcc_unreachable ();
1169 }
8f90be4c
NC
1170
1171 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1172 output_asm_insn (buf, out_operands);
1173
1174 return "";
1175}
1176
1177/* Output an inline constant. */
4816b8e4 1178
f27cd94d 1179static const char *
ef4bddc2 1180output_inline_const (machine_mode mode, rtx operands[])
8f90be4c 1181{
6e3a343d 1182 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1183 int trick_no;
1184 rtx out_operands[3];
1185 char buf[256];
1186 char load_op[256];
f27cd94d 1187 const char *dst_fmt;
6e3a343d 1188 HOST_WIDE_INT value;
8f90be4c
NC
1189
1190 value = INTVAL (operands[1]);
8f90be4c 1191
6e1f65b5
NS
1192 trick_no = try_constant_tricks (value, &x, &y);
1193 /* lrw's are handled separately: Large inlinable constants never get
1194 turned into lrw's. Our caller uses try_constant_tricks to back
1195 off to an lrw rather than calling this routine. */
1196 gcc_assert (trick_no != 0);
1197
8f90be4c
NC
1198 if (trick_no == 1)
1199 x = value;
1200
4816b8e4 1201 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1202 out_operands[0] = operands[0];
1203 out_operands[1] = GEN_INT (x);
1204
1205 if (trick_no > 2)
1206 out_operands[2] = GEN_INT (y);
1207
4816b8e4 1208 /* Select dst format based on mode. */
8f90be4c
NC
1209 if (mode == DImode && (! TARGET_LITTLE_END))
1210 dst_fmt = "%R0";
1211 else
1212 dst_fmt = "%0";
1213
1214 if (x >= 0 && x <= 127)
1215 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1216
8f90be4c 1217 /* Try exact power of two. */
6e3a343d 1218 else if (CONST_OK_FOR_M (x))
8f90be4c 1219 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1220
1221 /* Try exact power of two - 1. */
6e3a343d 1222 else if (CONST_OK_FOR_N (x))
8f90be4c 1223 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1224
6e3a343d
NC
1225 else
1226 {
1227 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1228 gcc_unreachable ();
1229 }
8f90be4c
NC
1230
1231 switch (trick_no)
1232 {
1233 case 1:
1234 strcpy (buf, load_op);
1235 break;
1236 case 2: /* not */
6e3a343d 1237 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1238 break;
1239 case 3: /* add */
6e3a343d 1240 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1241 break;
1242 case 4: /* sub */
6e3a343d 1243 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1244 break;
1245 case 5: /* rsub */
4816b8e4 1246 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1247 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1248 break;
6e3a343d
NC
1249 case 6: /* bseti */
1250 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1251 break;
1252 case 7: /* bclr */
6e3a343d 1253 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1254 break;
1255 case 8: /* rotl */
6e3a343d 1256 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1257 break;
1258 case 9: /* lsl */
6e3a343d 1259 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1260 break;
1261 case 10: /* ixh */
6e3a343d 1262 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1263 break;
1264 case 11: /* ixw */
6e3a343d 1265 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1266 break;
1267 default:
1268 return "";
1269 }
1270
1271 output_asm_insn (buf, out_operands);
1272
1273 return "";
1274}
1275
1276/* Output a move of a word or less value. */
4816b8e4 1277
f27cd94d 1278const char *
08903e08 1279mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
ef4bddc2 1280 machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1281{
1282 rtx dst = operands[0];
1283 rtx src = operands[1];
1284
1285 if (GET_CODE (dst) == REG)
1286 {
1287 if (GET_CODE (src) == REG)
1288 {
1289 if (REGNO (src) == CC_REG) /* r-c */
1290 return "mvc\t%0";
1291 else
1292 return "mov\t%0,%1"; /* r-r*/
1293 }
1294 else if (GET_CODE (src) == MEM)
1295 {
1296 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1297 return "lrw\t%0,[%1]"; /* a-R */
1298 else
f0f4da32
RS
1299 switch (GET_MODE (src)) /* r-m */
1300 {
4e10a5a7 1301 case E_SImode:
f0f4da32 1302 return "ldw\t%0,%1";
4e10a5a7 1303 case E_HImode:
f0f4da32 1304 return "ld.h\t%0,%1";
4e10a5a7 1305 case E_QImode:
f0f4da32
RS
1306 return "ld.b\t%0,%1";
1307 default:
6e1f65b5 1308 gcc_unreachable ();
f0f4da32 1309 }
8f90be4c
NC
1310 }
1311 else if (GET_CODE (src) == CONST_INT)
1312 {
6e3a343d 1313 HOST_WIDE_INT x, y;
8f90be4c
NC
1314
1315 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1316 return "movi\t%0,%1";
1317 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1318 return "bgeni\t%0,%P1\t// %1 %x1";
1319 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1320 return "bmaski\t%0,%N1\t// %1 %x1";
1321 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1322 return output_inline_const (SImode, operands); /* 1-2 insns */
1323 else
4816b8e4 1324 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1325 }
1326 else
4816b8e4 1327 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1328 }
1329 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1330 switch (GET_MODE (dst))
1331 {
4e10a5a7 1332 case E_SImode:
f0f4da32 1333 return "stw\t%1,%0";
4e10a5a7 1334 case E_HImode:
f0f4da32 1335 return "st.h\t%1,%0";
4e10a5a7 1336 case E_QImode:
f0f4da32
RS
1337 return "st.b\t%1,%0";
1338 default:
6e1f65b5 1339 gcc_unreachable ();
f0f4da32 1340 }
8f90be4c 1341
6e1f65b5 1342 gcc_unreachable ();
8f90be4c
NC
1343}
1344
8f90be4c
NC
1345/* Return a sequence of instructions to perform DI or DF move.
1346 Since the MCORE cannot move a DI or DF in one instruction, we have
1347 to take care when we see overlapping source and dest registers. */
4816b8e4 1348
f27cd94d 1349const char *
ef4bddc2 1350mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1351{
1352 rtx dst = operands[0];
1353 rtx src = operands[1];
1354
1355 if (GET_CODE (dst) == REG)
1356 {
1357 if (GET_CODE (src) == REG)
1358 {
1359 int dstreg = REGNO (dst);
1360 int srcreg = REGNO (src);
4816b8e4 1361
8f90be4c
NC
1362 /* Ensure the second source not overwritten. */
1363 if (srcreg + 1 == dstreg)
1364 return "mov %R0,%R1\n\tmov %0,%1";
1365 else
1366 return "mov %0,%1\n\tmov %R0,%R1";
1367 }
1368 else if (GET_CODE (src) == MEM)
1369 {
d72fe292 1370 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1371 int dstreg = REGNO (dst);
1372 int basereg = -1;
1373
1374 if (GET_CODE (memexp) == LABEL_REF)
1375 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1376 else if (GET_CODE (memexp) == REG)
1377 basereg = REGNO (memexp);
1378 else if (GET_CODE (memexp) == PLUS)
1379 {
1380 if (GET_CODE (XEXP (memexp, 0)) == REG)
1381 basereg = REGNO (XEXP (memexp, 0));
1382 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1383 basereg = REGNO (XEXP (memexp, 1));
1384 else
6e1f65b5 1385 gcc_unreachable ();
8f90be4c
NC
1386 }
1387 else
6e1f65b5 1388 gcc_unreachable ();
8f90be4c 1389
4816b8e4 1390 /* ??? length attribute is wrong here. */
8f90be4c
NC
1391 if (dstreg == basereg)
1392 {
4816b8e4 1393 /* Just load them in reverse order. */
8f90be4c 1394 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1395
8f90be4c 1396 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1397 and then fall through. */
8f90be4c
NC
1398 }
1399 else
1400 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1401 }
1402 else if (GET_CODE (src) == CONST_INT)
1403 {
1404 if (TARGET_LITTLE_END)
1405 {
1406 if (CONST_OK_FOR_I (INTVAL (src)))
1407 output_asm_insn ("movi %0,%1", operands);
1408 else if (CONST_OK_FOR_M (INTVAL (src)))
1409 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1410 else if (CONST_OK_FOR_N (INTVAL (src)))
1411 output_asm_insn ("bmaski %0,%N1", operands);
1412 else
6e1f65b5 1413 gcc_unreachable ();
8f90be4c
NC
1414
1415 if (INTVAL (src) < 0)
1416 return "bmaski %R0,32";
1417 else
1418 return "movi %R0,0";
1419 }
1420 else
1421 {
1422 if (CONST_OK_FOR_I (INTVAL (src)))
1423 output_asm_insn ("movi %R0,%1", operands);
1424 else if (CONST_OK_FOR_M (INTVAL (src)))
1425 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1426 else if (CONST_OK_FOR_N (INTVAL (src)))
1427 output_asm_insn ("bmaski %R0,%N1", operands);
1428 else
6e1f65b5 1429 gcc_unreachable ();
6e3a343d 1430
8f90be4c
NC
1431 if (INTVAL (src) < 0)
1432 return "bmaski %0,32";
1433 else
1434 return "movi %0,0";
1435 }
1436 }
1437 else
6e1f65b5 1438 gcc_unreachable ();
8f90be4c
NC
1439 }
1440 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1441 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1442 else
6e1f65b5 1443 gcc_unreachable ();
8f90be4c
NC
1444}
1445
1446/* Predicates used by the templates. */
1447
8f90be4c 1448int
08903e08 1449mcore_arith_S_operand (rtx op)
8f90be4c
NC
1450{
1451 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1452 return 1;
1453
1454 return 0;
1455}
1456
4816b8e4
NC
1457/* Expand insert bit field. BRC */
1458
8f90be4c 1459int
08903e08 1460mcore_expand_insv (rtx operands[])
8f90be4c
NC
1461{
1462 int width = INTVAL (operands[1]);
1463 int posn = INTVAL (operands[2]);
1464 int mask;
1465 rtx mreg, sreg, ereg;
1466
1467 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1468 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1469 we really want the md part to do. */
8f90be4c
NC
1470 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1471 {
4816b8e4
NC
1472 /* Do directly with bseti or bclri. */
1473 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1474 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1475 {
1476 mask = ~(1 << posn);
f7df4a84
RS
1477 emit_insn (gen_rtx_SET (operands[0],
1478 gen_rtx_AND (SImode, operands[0],
1479 GEN_INT (mask))));
8f90be4c
NC
1480 }
1481 else
1482 {
1483 mask = 1 << posn;
f7df4a84
RS
1484 emit_insn (gen_rtx_SET (operands[0],
1485 gen_rtx_IOR (SImode, operands[0],
1486 GEN_INT (mask))));
8f90be4c
NC
1487 }
1488
1489 return 1;
1490 }
1491
43a88a8c 1492 /* Look at some bit-field placements that we aren't interested
4816b8e4 1493 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1494 if (! TARGET_W_FIELD)
1495 return 0; /* Generally, give up about now. */
1496
1497 if (width == 8 && posn % 8 == 0)
1498 /* Byte sized and aligned; let caller break it up. */
1499 return 0;
1500
1501 if (width == 16 && posn % 16 == 0)
1502 /* Short sized and aligned; let caller break it up. */
1503 return 0;
1504
1505 /* The general case - we can do this a little bit better than what the
1506 machine independent part tries. This will get rid of all the subregs
1507 that mess up constant folding in combine when working with relaxed
4816b8e4 1508 immediates. */
8f90be4c
NC
1509
1510 /* If setting the entire field, do it directly. */
6e3a343d
NC
1511 if (GET_CODE (operands[3]) == CONST_INT
1512 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1513 {
1514 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f7df4a84
RS
1515 emit_insn (gen_rtx_SET (operands[0],
1516 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1517 return 1;
1518 }
1519
1520 /* Generate the clear mask. */
1521 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1522
1523 /* Clear the field, to overlay it later with the source. */
f7df4a84
RS
1524 emit_insn (gen_rtx_SET (operands[0],
1525 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1526
1527 /* If the source is constant 0, we've nothing to add back. */
1528 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1529 return 1;
1530
1531 /* XXX: Should we worry about more games with constant values?
1532 We've covered the high profile: set/clear single-bit and many-bit
1533 fields. How often do we see "arbitrary bit pattern" constants? */
1534 sreg = copy_to_mode_reg (SImode, operands[3]);
1535
1536 /* Extract src as same width as dst (needed for signed values). We
1537 always have to do this since we widen everything to SImode.
1538 We don't have to mask if we're shifting this up against the
1539 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1540 bits. */
f27cd94d 1541 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1542 {
1543 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f7df4a84 1544 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1545 }
1546
4816b8e4 1547 /* Insert source value in dest. */
8f90be4c 1548 if (posn != 0)
f7df4a84
RS
1549 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1550 GEN_INT (posn))));
8f90be4c 1551
f7df4a84
RS
1552 emit_insn (gen_rtx_SET (operands[0],
1553 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1554
1555 return 1;
1556}
8f90be4c
NC
1557\f
1558/* ??? Block move stuff stolen from m88k. This code has not been
1559 verified for correctness. */
1560
1561/* Emit code to perform a block move. Choose the best method.
1562
1563 OPERANDS[0] is the destination.
1564 OPERANDS[1] is the source.
1565 OPERANDS[2] is the size.
1566 OPERANDS[3] is the alignment safe to use. */
1567
1568/* Emit code to perform a block move with an offset sequence of ldw/st
1569 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1570 known constants. DEST and SRC are registers. OFFSET is the known
1571 starting point for the output pattern. */
1572
ef4bddc2 1573static const machine_mode mode_from_align[] =
8f90be4c
NC
1574{
1575 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1576};
1577
1578static void
88042663 1579block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1580{
1581 rtx temp[2];
ef4bddc2 1582 machine_mode mode[2];
8f90be4c 1583 int amount[2];
88042663 1584 bool active[2];
8f90be4c
NC
1585 int phase = 0;
1586 int next;
88042663
RH
1587 int offset_ld = 0;
1588 int offset_st = 0;
1589 rtx x;
8f90be4c 1590
88042663
RH
1591 x = XEXP (dst_mem, 0);
1592 if (!REG_P (x))
1593 {
1594 x = force_reg (Pmode, x);
1595 dst_mem = replace_equiv_address (dst_mem, x);
1596 }
8f90be4c 1597
88042663
RH
1598 x = XEXP (src_mem, 0);
1599 if (!REG_P (x))
8f90be4c 1600 {
88042663
RH
1601 x = force_reg (Pmode, x);
1602 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1603 }
1604
88042663
RH
1605 active[0] = active[1] = false;
1606
8f90be4c
NC
1607 do
1608 {
8f90be4c 1609 next = phase;
88042663 1610 phase ^= 1;
8f90be4c
NC
1611
1612 if (size > 0)
1613 {
88042663
RH
1614 int next_amount;
1615
1616 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1617 next_amount = MIN (next_amount, align);
1618
1619 amount[next] = next_amount;
1620 mode[next] = mode_from_align[next_amount];
1621 temp[next] = gen_reg_rtx (mode[next]);
1622
1623 x = adjust_address (src_mem, mode[next], offset_ld);
f7df4a84 1624 emit_insn (gen_rtx_SET (temp[next], x));
88042663
RH
1625
1626 offset_ld += next_amount;
1627 size -= next_amount;
1628 active[next] = true;
8f90be4c
NC
1629 }
1630
1631 if (active[phase])
1632 {
88042663 1633 active[phase] = false;
8f90be4c 1634
88042663 1635 x = adjust_address (dst_mem, mode[phase], offset_st);
f7df4a84 1636 emit_insn (gen_rtx_SET (x, temp[phase]));
88042663 1637
8f90be4c
NC
1638 offset_st += amount[phase];
1639 }
1640 }
1641 while (active[next]);
1642}
1643
88042663
RH
1644bool
1645mcore_expand_block_move (rtx *operands)
8f90be4c 1646{
88042663
RH
1647 HOST_WIDE_INT align, bytes, max;
1648
1649 if (GET_CODE (operands[2]) != CONST_INT)
1650 return false;
1651
1652 bytes = INTVAL (operands[2]);
1653 align = INTVAL (operands[3]);
8f90be4c 1654
88042663
RH
1655 if (bytes <= 0)
1656 return false;
1657 if (align > 4)
1658 align = 4;
1659
1660 switch (align)
8f90be4c 1661 {
88042663
RH
1662 case 4:
1663 if (bytes & 1)
1664 max = 4*4;
1665 else if (bytes & 3)
1666 max = 8*4;
1667 else
1668 max = 16*4;
1669 break;
1670 case 2:
1671 max = 4*2;
1672 break;
1673 case 1:
1674 max = 4*1;
1675 break;
1676 default:
6e1f65b5 1677 gcc_unreachable ();
88042663
RH
1678 }
1679
1680 if (bytes <= max)
1681 {
1682 block_move_sequence (operands[0], operands[1], bytes, align);
1683 return true;
8f90be4c
NC
1684 }
1685
88042663 1686 return false;
8f90be4c
NC
1687}
1688\f
1689
1690/* Code to generate prologue and epilogue sequences. */
1691static int number_of_regs_before_varargs;
4816b8e4 1692
bd5bd7ac 1693/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1694 for a varargs function. */
1695static int current_function_anonymous_args;
1696
8f90be4c
NC
1697#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1698#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1699#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1700
8f90be4c 1701static void
08903e08 1702layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1703{
1704 int n;
1705 unsigned int i;
1706 int nbytes;
1707 int regarg;
1708 int localregarg;
8f90be4c
NC
1709 int outbounds;
1710 unsigned int growths;
1711 int step;
1712
1713 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1714 was passed partially in registers and partially on the stack. */
38173d38 1715 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1716
1717 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1718 if (current_function_anonymous_args)
1719 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1720
1721 infp->arg_size = nbytes;
1722
1723 /* How much space to save non-volatile registers we stomp. */
1724 infp->reg_mask = calc_live_regs (& n);
1725 infp->reg_size = n * 4;
1726
14bc6742 1727 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1728 infp->local_size = get_frame_size ();
38173d38 1729 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1730
1731 /* Make sure we have a whole number of words for the locals. */
1732 if (infp->local_size % STACK_BYTES)
1733 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1734
1735 /* Only thing we know we have to pad is the outbound space, since
1736 we've aligned our locals assuming that base of locals is aligned. */
1737 infp->pad_local = 0;
1738 infp->pad_reg = 0;
1739 infp->pad_outbound = 0;
1740 if (infp->outbound_size % STACK_BYTES)
1741 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1742
1743 /* Now we see how we want to stage the prologue so that it does
1744 the most appropriate stack growth and register saves to either:
1745 (1) run fast,
1746 (2) reduce instruction space, or
1747 (3) reduce stack space. */
b6a1cbae 1748 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1749 infp->growth[i] = 0;
1750
1751 regarg = infp->reg_size + infp->arg_size;
1752 localregarg = infp->local_size + regarg;
8f90be4c
NC
1753 outbounds = infp->outbound_size + infp->pad_outbound;
1754 growths = 0;
1755
1756 /* XXX: Consider one where we consider localregarg + outbound too! */
1757
1758 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1759 use stw's with offsets and buy the frame in one shot. */
1760 if (localregarg <= ADDI_REACH
1761 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1762 {
1763 /* Make sure we'll be aligned. */
1764 if (localregarg % STACK_BYTES)
1765 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1766
1767 step = localregarg + infp->pad_reg;
1768 infp->reg_offset = infp->local_size;
1769
1770 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1771 {
1772 step += outbounds;
1773 infp->reg_offset += outbounds;
1774 outbounds = 0;
1775 }
1776
1777 infp->arg_offset = step - 4;
1778 infp->growth[growths++] = step;
1779 infp->reg_growth = growths;
1780 infp->local_growth = growths;
1781
4816b8e4 1782 /* If we haven't already folded it in. */
8f90be4c
NC
1783 if (outbounds)
1784 infp->growth[growths++] = outbounds;
1785
1786 goto finish;
1787 }
1788
1789 /* Frame can't be done with a single subi, but can be done with 2
1790 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1791 shift some of the stack purchase into the first subi, so both are
1792 single instructions. */
1793 if (localregarg <= STORE_REACH
1794 && (infp->local_size > ADDI_REACH)
1795 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1796 {
1797 int all;
1798
1799 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1800 if (localregarg % STACK_BYTES)
1801 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1802
1803 all = localregarg + infp->pad_reg + infp->pad_local;
1804 step = ADDI_REACH; /* As much up front as we can. */
1805 if (step > all)
1806 step = all;
1807
1808 /* XXX: Consider whether step will still be aligned; we believe so. */
1809 infp->arg_offset = step - 4;
1810 infp->growth[growths++] = step;
1811 infp->reg_growth = growths;
1812 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1813 all -= step;
1814
4816b8e4 1815 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1816 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1817 {
1818 all += outbounds;
1819 outbounds = 0;
1820 }
1821
4816b8e4 1822 /* Get the rest of the locals in place. */
8f90be4c
NC
1823 step = all;
1824 infp->growth[growths++] = step;
1825 infp->local_growth = growths;
1826 all -= step;
1827
819bfe0e 1828 gcc_assert (all == 0);
8f90be4c 1829
4816b8e4 1830 /* Finish off if we need to do so. */
8f90be4c
NC
1831 if (outbounds)
1832 infp->growth[growths++] = outbounds;
1833
1834 goto finish;
1835 }
1836
1837 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1838 Then we buy the rest of the frame in 1 or 2 steps depending on
1839 whether we need a frame pointer. */
1840 if ((regarg % STACK_BYTES) == 0)
1841 {
1842 infp->growth[growths++] = regarg;
1843 infp->reg_growth = growths;
1844 infp->arg_offset = regarg - 4;
1845 infp->reg_offset = 0;
1846
1847 if (infp->local_size % STACK_BYTES)
1848 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1849
1850 step = infp->local_size + infp->pad_local;
1851
1852 if (!frame_pointer_needed)
1853 {
1854 step += outbounds;
1855 outbounds = 0;
1856 }
1857
1858 infp->growth[growths++] = step;
1859 infp->local_growth = growths;
1860
4816b8e4 1861 /* If there's any left to be done. */
8f90be4c
NC
1862 if (outbounds)
1863 infp->growth[growths++] = outbounds;
1864
1865 goto finish;
1866 }
1867
1868 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1869 -- regarg is not aligned, but it's a small number of registers;
1870 use some of localsize so that regarg is aligned and then
1871 save the registers. */
8f90be4c
NC
1872
1873 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1874 -- does not optimize space consumption.
1875 -- does not attempt to optimize instruction counts.
1876 -- but it is safe for all alignments. */
8f90be4c
NC
1877 if (regarg % STACK_BYTES != 0)
1878 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1879
1880 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1881 infp->reg_growth = growths;
1882 infp->arg_offset = infp->growth[0] - 4;
1883 infp->reg_offset = 0;
1884
1885 if (frame_pointer_needed)
1886 {
1887 if (infp->local_size % STACK_BYTES != 0)
1888 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1889
1890 infp->growth[growths++] = infp->local_size + infp->pad_local;
1891 infp->local_growth = growths;
1892
1893 infp->growth[growths++] = outbounds;
1894 }
1895 else
1896 {
1897 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1898 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1899
1900 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1901 infp->local_growth = growths;
1902 }
1903
f27cd94d 1904 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1905 finish:
819bfe0e
JM
1906 gcc_assert (infp->reg_offset >= 0);
1907 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1908
1909 for (i = 0; i < growths; i++)
6e1f65b5 1910 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1911}
1912
1913/* Define the offset between two registers, one to be eliminated, and
1914 the other its replacement, at the start of a routine. */
4816b8e4 1915
8f90be4c 1916int
08903e08 1917mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1918{
1919 int above_frame;
1920 int below_frame;
1921 struct mcore_frame fi;
1922
1923 layout_mcore_frame (& fi);
1924
1925 /* fp to ap */
1926 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1927 /* sp to fp */
1928 below_frame = fi.outbound_size + fi.pad_outbound;
1929
1930 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1931 return above_frame;
1932
1933 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1934 return above_frame + below_frame;
1935
1936 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1937 return below_frame;
1938
6e1f65b5 1939 gcc_unreachable ();
8f90be4c
NC
1940}
1941
4816b8e4
NC
1942/* Keep track of some information about varargs for the prolog. */
1943
09a2b93a 1944static void
d5cc9181 1945mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
ef4bddc2 1946 machine_mode mode, tree type,
09a2b93a
KH
1947 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1948 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1949{
d5cc9181
JR
1950 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1951
8f90be4c
NC
1952 current_function_anonymous_args = 1;
1953
1954 /* We need to know how many argument registers are used before
1955 the varargs start, so that we can push the remaining argument
1956 registers during the prologue. */
09a2b93a 1957 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1958
dab66575 1959 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1960 Until I can find it this workaround always pushes the
1961 last named argument onto the stack. */
09a2b93a 1962 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1963
1964 /* The last named argument may be split between argument registers
1965 and the stack. Allow for this here. */
1966 if (number_of_regs_before_varargs > NPARM_REGS)
1967 number_of_regs_before_varargs = NPARM_REGS;
1968}
1969
1970void
08903e08 1971mcore_expand_prolog (void)
8f90be4c
NC
1972{
1973 struct mcore_frame fi;
1974 int space_allocated = 0;
1975 int growth = 0;
1976
1977 /* Find out what we're doing. */
1978 layout_mcore_frame (&fi);
1979
1980 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1981 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1982
1983 if (TARGET_CG_DATA)
1984 {
1985 /* Emit a symbol for this routine's frame size. */
1986 rtx x;
8f90be4c
NC
1987
1988 x = DECL_RTL (current_function_decl);
1989
6e1f65b5 1990 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1991
1992 x = XEXP (x, 0);
1993
6e1f65b5 1994 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1995
04695783 1996 free (mcore_current_function_name);
8f90be4c 1997
1dcd444b 1998 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1999
2000 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2001
e3b5732b 2002 if (cfun->calls_alloca)
8f90be4c
NC
2003 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2004
2005 /* 970425: RBE:
2006 We're looking at how the 8byte alignment affects stack layout
2007 and where we had to pad things. This emits information we can
2008 extract which tells us about frame sizes and the like. */
2009 fprintf (asm_out_file,
2010 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2011 mcore_current_function_name,
2012 fi.arg_size, fi.reg_size, fi.reg_mask,
2013 fi.local_size, fi.outbound_size,
2014 frame_pointer_needed);
2015 }
2016
2017 if (mcore_naked_function_p ())
2018 return;
2019
2020 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 2021 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2022
2023 /* If we have a parameter passed partially in regs and partially in memory,
2024 the registers will have been stored to memory already in function.c. So
2025 we only need to do something here for varargs functions. */
38173d38 2026 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2027 {
2028 int offset;
2029 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2030 int remaining = fi.arg_size;
2031
2032 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2033 {
2034 emit_insn (gen_movsi
f1c25d3b 2035 (gen_rtx_MEM (SImode,
0a81f074
RS
2036 plus_constant (Pmode, stack_pointer_rtx,
2037 offset)),
f1c25d3b 2038 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2039 }
2040 }
2041
4816b8e4 2042 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2043 if (growth < fi.reg_growth)
08903e08 2044 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2045
2046 if (fi.reg_size != 0)
2047 {
2048 int i;
2049 int offs = fi.reg_offset;
2050
2051 for (i = 15; i >= 0; i--)
2052 {
2053 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2054 {
2055 int first_reg = 15;
2056
2057 while (fi.reg_mask & (1 << first_reg))
2058 first_reg--;
2059 first_reg++;
2060
f1c25d3b
KH
2061 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2062 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2063 GEN_INT (16 - first_reg)));
2064
2065 i -= (15 - first_reg);
2066 offs += (16 - first_reg) * 4;
2067 }
2068 else if (fi.reg_mask & (1 << i))
2069 {
2070 emit_insn (gen_movsi
f1c25d3b 2071 (gen_rtx_MEM (SImode,
0a81f074
RS
2072 plus_constant (Pmode, stack_pointer_rtx,
2073 offs)),
f1c25d3b 2074 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2075 offs += 4;
2076 }
2077 }
2078 }
2079
2080 /* Figure the locals + outbounds. */
2081 if (frame_pointer_needed)
2082 {
2083 /* If we haven't already purchased to 'fp'. */
2084 if (growth < fi.local_growth)
08903e08 2085 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2086
2087 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2088
4816b8e4 2089 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2090 if (fi.growth[growth])
2091 output_stack_adjust (-1, fi.growth[growth++]);
2092 }
2093 else
2094 {
2095 if (growth < fi.local_growth)
08903e08 2096 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2097 if (fi.growth[growth])
2098 output_stack_adjust (-1, fi.growth[growth++]);
2099 }
2100}
2101
2102void
08903e08 2103mcore_expand_epilog (void)
8f90be4c
NC
2104{
2105 struct mcore_frame fi;
2106 int i;
2107 int offs;
2108 int growth = MAX_STACK_GROWS - 1 ;
2109
f27cd94d 2110
8f90be4c
NC
2111 /* Find out what we're doing. */
2112 layout_mcore_frame(&fi);
2113
2114 if (mcore_naked_function_p ())
2115 return;
f27cd94d 2116
8f90be4c
NC
2117 /* If we had a frame pointer, restore the sp from that. */
2118 if (frame_pointer_needed)
2119 {
2120 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2121 growth = fi.local_growth - 1;
2122 }
2123 else
2124 {
2125 /* XXX: while loop should accumulate and do a single sell. */
2126 while (growth >= fi.local_growth)
2127 {
2128 if (fi.growth[growth] != 0)
2129 output_stack_adjust (1, fi.growth[growth]);
2130 growth--;
2131 }
2132 }
2133
2134 /* Make sure we've shrunk stack back to the point where the registers
2135 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2136 register save information back off the stack. */
8f90be4c
NC
2137 while (growth >= fi.reg_growth)
2138 output_stack_adjust ( 1, fi.growth[growth--]);
2139
2140 offs = fi.reg_offset;
2141
2142 for (i = 15; i >= 0; i--)
2143 {
2144 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2145 {
2146 int first_reg;
2147
2148 /* Find the starting register. */
2149 first_reg = 15;
2150
2151 while (fi.reg_mask & (1 << first_reg))
2152 first_reg--;
2153
2154 first_reg++;
2155
f1c25d3b
KH
2156 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2157 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2158 GEN_INT (16 - first_reg)));
2159
2160 i -= (15 - first_reg);
2161 offs += (16 - first_reg) * 4;
2162 }
2163 else if (fi.reg_mask & (1 << i))
2164 {
2165 emit_insn (gen_movsi
f1c25d3b
KH
2166 (gen_rtx_REG (SImode, i),
2167 gen_rtx_MEM (SImode,
0a81f074
RS
2168 plus_constant (Pmode, stack_pointer_rtx,
2169 offs))));
8f90be4c
NC
2170 offs += 4;
2171 }
2172 }
2173
2174 /* Give back anything else. */
dab66575 2175 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2176 while (growth >= 0)
2177 output_stack_adjust ( 1, fi.growth[growth--]);
2178}
2179\f
2180/* This code is borrowed from the SH port. */
2181
2182/* The MCORE cannot load a large constant into a register, constants have to
2183 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2184 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2185 means that we often have to dump a constant inside a function, and
2186 generate code to branch around it.
2187
2188 It is important to minimize this, since the branches will slow things
2189 down and make things bigger.
2190
2191 Worst case code looks like:
2192
2193 lrw L1,r0
2194 br L2
2195 align
2196 L1: .long value
2197 L2:
2198 ..
2199
2200 lrw L3,r0
2201 br L4
2202 align
2203 L3: .long value
2204 L4:
2205 ..
2206
2207 We fix this by performing a scan before scheduling, which notices which
2208 instructions need to have their operands fetched from the constant table
2209 and builds the table.
2210
2211 The algorithm is:
2212
2213 scan, find an instruction which needs a pcrel move. Look forward, find the
2214 last barrier which is within MAX_COUNT bytes of the requirement.
2215 If there isn't one, make one. Process all the instructions between
2216 the find and the barrier.
2217
2218 In the above example, we can tell that L3 is within 1k of L1, so
2219 the first move can be shrunk from the 2 insn+constant sequence into
2220 just 1 insn, and the constant moved to L3 to make:
2221
2222 lrw L1,r0
2223 ..
2224 lrw L3,r0
2225 bra L4
2226 align
2227 L3:.long value
2228 L4:.long value
2229
2230 Then the second move becomes the target for the shortening process. */
2231
2232typedef struct
2233{
2234 rtx value; /* Value in table. */
2235 rtx label; /* Label of value. */
2236} pool_node;
2237
2238/* The maximum number of constants that can fit into one pool, since
2239 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2240 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2241 we need to add a branch/align before the constant pool. */
2242
2243#define MAX_COUNT 1016
2244#define MAX_POOL_SIZE (MAX_COUNT/4)
2245static pool_node pool_vector[MAX_POOL_SIZE];
2246static int pool_size;
2247
2248/* Dump out any constants accumulated in the final pass. These
2249 will only be labels. */
4816b8e4 2250
f27cd94d 2251const char *
08903e08 2252mcore_output_jump_label_table (void)
8f90be4c
NC
2253{
2254 int i;
2255
2256 if (pool_size)
2257 {
2258 fprintf (asm_out_file, "\t.align 2\n");
2259
2260 for (i = 0; i < pool_size; i++)
2261 {
2262 pool_node * p = pool_vector + i;
2263
4977bab6 2264 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2265
2266 output_asm_insn (".long %0", &p->value);
2267 }
2268
2269 pool_size = 0;
2270 }
2271
2272 return "";
2273}
2274
8f90be4c 2275/* Check whether insn is a candidate for a conditional. */
4816b8e4 2276
8f90be4c 2277static cond_type
08903e08 2278is_cond_candidate (rtx insn)
8f90be4c
NC
2279{
2280 /* The only things we conditionalize are those that can be directly
2281 changed into a conditional. Only bother with SImode items. If
2282 we wanted to be a little more aggressive, we could also do other
4816b8e4 2283 modes such as DImode with reg-reg move or load 0. */
b64925dc 2284 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2285 {
2286 rtx pat = PATTERN (insn);
2287 rtx src, dst;
2288
2289 if (GET_CODE (pat) != SET)
2290 return COND_NO;
2291
2292 dst = XEXP (pat, 0);
2293
2294 if ((GET_CODE (dst) != REG &&
2295 GET_CODE (dst) != SUBREG) ||
2296 GET_MODE (dst) != SImode)
2297 return COND_NO;
2298
2299 src = XEXP (pat, 1);
2300
2301 if ((GET_CODE (src) == REG ||
2302 (GET_CODE (src) == SUBREG &&
2303 GET_CODE (SUBREG_REG (src)) == REG)) &&
2304 GET_MODE (src) == SImode)
2305 return COND_MOV_INSN;
2306 else if (GET_CODE (src) == CONST_INT &&
2307 INTVAL (src) == 0)
2308 return COND_CLR_INSN;
2309 else if (GET_CODE (src) == PLUS &&
2310 (GET_CODE (XEXP (src, 0)) == REG ||
2311 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2312 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2313 GET_MODE (XEXP (src, 0)) == SImode &&
2314 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2315 INTVAL (XEXP (src, 1)) == 1)
2316 return COND_INC_INSN;
2317 else if (((GET_CODE (src) == MINUS &&
2318 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2319 INTVAL( XEXP (src, 1)) == 1) ||
2320 (GET_CODE (src) == PLUS &&
2321 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2322 INTVAL (XEXP (src, 1)) == -1)) &&
2323 (GET_CODE (XEXP (src, 0)) == REG ||
2324 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2325 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2326 GET_MODE (XEXP (src, 0)) == SImode)
2327 return COND_DEC_INSN;
2328
14bc6742 2329 /* Some insns that we don't bother with:
8f90be4c
NC
2330 (set (rx:DI) (ry:DI))
2331 (set (rx:DI) (const_int 0))
2332 */
2333
2334 }
b64925dc
SB
2335 else if (JUMP_P (insn)
2336 && GET_CODE (PATTERN (insn)) == SET
2337 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
8f90be4c
NC
2338 return COND_BRANCH_INSN;
2339
2340 return COND_NO;
2341}
2342
2343/* Emit a conditional version of insn and replace the old insn with the
2344 new one. Return the new insn if emitted. */
4816b8e4 2345
b32d5189 2346static rtx_insn *
d8485bdb 2347emit_new_cond_insn (rtx_insn *insn, int cond)
8f90be4c
NC
2348{
2349 rtx c_insn = 0;
2350 rtx pat, dst, src;
2351 cond_type num;
2352
2353 if ((num = is_cond_candidate (insn)) == COND_NO)
2354 return NULL;
2355
2356 pat = PATTERN (insn);
2357
b64925dc 2358 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2359 {
2360 dst = SET_DEST (pat);
2361 src = SET_SRC (pat);
2362 }
2363 else
cd4c46f3
KG
2364 {
2365 dst = JUMP_LABEL (insn);
2366 src = NULL_RTX;
2367 }
8f90be4c
NC
2368
2369 switch (num)
2370 {
2371 case COND_MOV_INSN:
2372 case COND_CLR_INSN:
2373 if (cond)
2374 c_insn = gen_movt0 (dst, src, dst);
2375 else
2376 c_insn = gen_movt0 (dst, dst, src);
2377 break;
2378
2379 case COND_INC_INSN:
2380 if (cond)
2381 c_insn = gen_incscc (dst, dst);
2382 else
2383 c_insn = gen_incscc_false (dst, dst);
2384 break;
2385
2386 case COND_DEC_INSN:
2387 if (cond)
2388 c_insn = gen_decscc (dst, dst);
2389 else
2390 c_insn = gen_decscc_false (dst, dst);
2391 break;
2392
2393 case COND_BRANCH_INSN:
2394 if (cond)
2395 c_insn = gen_branch_true (dst);
2396 else
2397 c_insn = gen_branch_false (dst);
2398 break;
2399
2400 default:
2401 return NULL;
2402 }
2403
2404 /* Only copy the notes if they exist. */
2405 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2406 {
2407 /* We really don't need to bother with the notes and links at this
2408 point, but go ahead and save the notes. This will help is_dead()
2409 when applying peepholes (links don't matter since they are not
2410 used any more beyond this point for the mcore). */
2411 REG_NOTES (c_insn) = REG_NOTES (insn);
2412 }
2413
2414 if (num == COND_BRANCH_INSN)
2415 {
2416 /* For jumps, we need to be a little bit careful and emit the new jump
2417 before the old one and to update the use count for the target label.
2418 This way, the barrier following the old (uncond) jump will get
2419 deleted, but the label won't. */
2420 c_insn = emit_jump_insn_before (c_insn, insn);
2421
2422 ++ LABEL_NUSES (dst);
2423
2424 JUMP_LABEL (c_insn) = dst;
2425 }
2426 else
2427 c_insn = emit_insn_after (c_insn, insn);
2428
2429 delete_insn (insn);
2430
b32d5189 2431 return as_a <rtx_insn *> (c_insn);
8f90be4c
NC
2432}
2433
2434/* Attempt to change a basic block into a series of conditional insns. This
2435 works by taking the branch at the end of the 1st block and scanning for the
2436 end of the 2nd block. If all instructions in the 2nd block have cond.
2437 versions and the label at the start of block 3 is the same as the target
2438 from the branch at block 1, then conditionalize all insn in block 2 using
2439 the inverse condition of the branch at block 1. (Note I'm bending the
2440 definition of basic block here.)
2441
2442 e.g., change:
2443
2444 bt L2 <-- end of block 1 (delete)
2445 mov r7,r8
2446 addu r7,1
2447 br L3 <-- end of block 2
2448
2449 L2: ... <-- start of block 3 (NUSES==1)
2450 L3: ...
2451
2452 to:
2453
2454 movf r7,r8
2455 incf r7
2456 bf L3
2457
2458 L3: ...
2459
2460 we can delete the L2 label if NUSES==1 and re-apply the optimization
2461 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2462 if-then-else statement to be conditionalized. BRC */
b32d5189
DM
2463static rtx_insn *
2464conditionalize_block (rtx_insn *first)
8f90be4c 2465{
b32d5189 2466 rtx_insn *insn;
8f90be4c 2467 rtx br_pat;
b32d5189
DM
2468 rtx_insn *end_blk_1_br = 0;
2469 rtx_insn *end_blk_2_insn = 0;
2470 rtx_insn *start_blk_3_lab = 0;
8f90be4c
NC
2471 int cond;
2472 int br_lab_num;
2473 int blk_size = 0;
2474
2475
2476 /* Check that the first insn is a candidate conditional jump. This is
2477 the one that we'll eliminate. If not, advance to the next insn to
2478 try. */
b64925dc
SB
2479 if (! JUMP_P (first)
2480 || GET_CODE (PATTERN (first)) != SET
2481 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
8f90be4c
NC
2482 return NEXT_INSN (first);
2483
2484 /* Extract some information we need. */
2485 end_blk_1_br = first;
2486 br_pat = PATTERN (end_blk_1_br);
2487
2488 /* Complement the condition since we use the reverse cond. for the insns. */
2489 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2490
2491 /* Determine what kind of branch we have. */
2492 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2493 {
2494 /* A normal branch, so extract label out of first arm. */
2495 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2496 }
2497 else
2498 {
2499 /* An inverse branch, so extract the label out of the 2nd arm
2500 and complement the condition. */
2501 cond = (cond == 0);
2502 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2503 }
2504
2505 /* Scan forward for the start of block 2: it must start with a
2506 label and that label must be the same as the branch target
2507 label from block 1. We don't care about whether block 2 actually
2508 ends with a branch or a label (an uncond. branch is
2509 conditionalizable). */
2510 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2511 {
2512 enum rtx_code code;
2513
2514 code = GET_CODE (insn);
2515
14bc6742 2516 /* Look for the label at the start of block 3. */
8f90be4c
NC
2517 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2518 break;
2519
2520 /* Skip barriers, notes, and conditionalizable insns. If the
2521 insn is not conditionalizable or makes this optimization fail,
2522 just return the next insn so we can start over from that point. */
2523 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2524 return NEXT_INSN (insn);
2525
112cdef5 2526 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2527 if (code == JUMP_INSN || code == INSN)
2528 {
2529 blk_size ++;
2530 end_blk_2_insn = insn;
2531 }
2532 }
2533
2534 if (!insn)
2535 return insn;
2536
2537 /* It is possible for this optimization to slow performance if the blocks
2538 are long. This really depends upon whether the branch is likely taken
2539 or not. If the branch is taken, we slow performance in many cases. But,
2540 if the branch is not taken, we always help performance (for a single
2541 block, but for a double block (i.e. when the optimization is re-applied)
2542 this is not true since the 'right thing' depends on the overall length of
2543 the collapsed block). As a compromise, don't apply this optimization on
2544 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2545 the best threshold depends on the latencies of the instructions (i.e.,
2546 the branch penalty). */
2547 if (optimize > 1 && blk_size > 2)
2548 return insn;
2549
2550 /* At this point, we've found the start of block 3 and we know that
2551 it is the destination of the branch from block 1. Also, all
2552 instructions in the block 2 are conditionalizable. So, apply the
2553 conditionalization and delete the branch. */
2554 start_blk_3_lab = insn;
2555
2556 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2557 insn = NEXT_INSN (insn))
2558 {
b32d5189 2559 rtx_insn *newinsn;
8f90be4c 2560
4654c0cf 2561 if (insn->deleted ())
8f90be4c
NC
2562 continue;
2563
14bc6742 2564 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2565 if ((newinsn = emit_new_cond_insn (insn, cond)))
2566 {
2567 if (end_blk_2_insn == insn)
2568 end_blk_2_insn = newinsn;
2569
2570 insn = newinsn;
2571 }
2572 }
2573
2574 /* Note whether we will delete the label starting blk 3 when the jump
2575 gets deleted. If so, we want to re-apply this optimization at the
2576 last real instruction right before the label. */
2577 if (LABEL_NUSES (start_blk_3_lab) == 1)
2578 {
2579 start_blk_3_lab = 0;
2580 }
2581
2582 /* ??? we probably should redistribute the death notes for this insn, esp.
2583 the death of cc, but it doesn't really matter this late in the game.
2584 The peepholes all use is_dead() which will find the correct death
2585 regardless of whether there is a note. */
2586 delete_insn (end_blk_1_br);
2587
2588 if (! start_blk_3_lab)
2589 return end_blk_2_insn;
2590
4816b8e4 2591 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2592 return NEXT_INSN (start_blk_3_lab);
2593}
2594
2595/* Apply the conditionalization of blocks optimization. This is the
2596 outer loop that traverses through the insns scanning for a branch
2597 that signifies an opportunity to apply the optimization. Note that
2598 this optimization is applied late. If we could apply it earlier,
2599 say before cse 2, it may expose more optimization opportunities.
2600 but, the pay back probably isn't really worth the effort (we'd have
2601 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2602 in before cse 2). */
2603
8f90be4c 2604static void
08903e08 2605conditionalize_optimization (void)
8f90be4c 2606{
b32d5189 2607 rtx_insn *insn;
8f90be4c 2608
18dbd950 2609 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2610 continue;
2611}
2612
18dbd950 2613/* This is to handle loads from the constant pool. */
4816b8e4 2614
18dbd950 2615static void
08903e08 2616mcore_reorg (void)
8f90be4c
NC
2617{
2618 /* Reset this variable. */
2619 current_function_anonymous_args = 0;
2620
8f90be4c
NC
2621 if (optimize == 0)
2622 return;
2623
2624 /* Conditionalize blocks where we can. */
18dbd950 2625 conditionalize_optimization ();
8f90be4c
NC
2626
2627 /* Literal pool generation is now pushed off until the assembler. */
2628}
2629
2630\f
f0f4da32 2631/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2632
f0f4da32 2633bool
08903e08 2634mcore_r15_operand_p (rtx x)
f0f4da32
RS
2635{
2636 switch (GET_CODE (x))
2637 {
2638 case CONST_INT:
2639 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2640
f0f4da32
RS
2641 case REG:
2642 case SUBREG:
2643 case MEM:
2644 return 1;
2645
2646 default:
2647 return 0;
2648 }
2649}
2650
0a2aaacc 2651/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2652 directly move X into it, use r1-r14 as a temporary. */
08903e08 2653
f0f4da32 2654enum reg_class
0a2aaacc 2655mcore_secondary_reload_class (enum reg_class rclass,
ef4bddc2 2656 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2657{
0a2aaacc 2658 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2659 && !mcore_r15_operand_p (x))
2660 return LRW_REGS;
2661 return NO_REGS;
2662}
8f90be4c 2663
f0f4da32 2664/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2665 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2666 use LRW_REGS instead. */
08903e08 2667
8f90be4c 2668enum reg_class
0a2aaacc 2669mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2670{
0a2aaacc 2671 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2672 return LRW_REGS;
8f90be4c 2673
0a2aaacc 2674 return rclass;
8f90be4c
NC
2675}
2676
2677/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2678 register. Note that the current version doesn't worry about whether
2679 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2680 in r2 matches an SImode in r2. Might think in the future about whether
2681 we want to be able to say something about modes. */
08903e08 2682
8f90be4c 2683int
08903e08 2684mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2685{
14bc6742 2686 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2687 while (GET_CODE (x) == SUBREG)
2688 x = SUBREG_REG (x);
2689
2690 while (GET_CODE (y) == SUBREG)
2691 y = SUBREG_REG (y);
2692
2693 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2694 return 1;
2695
2696 return 0;
2697}
2698
c5387660
JM
2699static void
2700mcore_option_override (void)
8f90be4c 2701{
8f90be4c
NC
2702 /* Only the m340 supports little endian code. */
2703 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2704 target_flags |= MASK_M340;
8f90be4c 2705}
fac0f722 2706
8f90be4c 2707\f
8f90be4c
NC
2708/* Compute the number of word sized registers needed to
2709 hold a function argument of mode MODE and type TYPE. */
08903e08 2710
8f90be4c 2711int
ef4bddc2 2712mcore_num_arg_regs (machine_mode mode, const_tree type)
8f90be4c
NC
2713{
2714 int size;
2715
fe984136 2716 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2717 return 0;
2718
2719 if (type && mode == BLKmode)
2720 size = int_size_in_bytes (type);
2721 else
2722 size = GET_MODE_SIZE (mode);
2723
2724 return ROUND_ADVANCE (size);
2725}
2726
2727static rtx
ef4bddc2 2728handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2729{
2730 int size;
2731
696e78bf 2732 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2733 of bytes is passed packed into registers (or spilled onto the stack if
2734 not enough registers are available) with the last few bytes of the
2735 structure being packed, left-justified, into the last register/stack slot.
2736 GCC handles this correctly if the last word is in a stack slot, but we
2737 have to generate a special, PARALLEL RTX if the last word is in an
2738 argument register. */
2739 if (type
2740 && TYPE_MODE (type) == BLKmode
2741 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2742 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2743 && (size % UNITS_PER_WORD != 0)
2744 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2745 {
2746 rtx arg_regs [NPARM_REGS];
2747 int nregs;
2748 rtx result;
2749 rtvec rtvec;
2750
2751 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2752 {
2753 arg_regs [nregs] =
2754 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2755 GEN_INT (nregs * UNITS_PER_WORD));
2756 nregs ++;
2757 }
2758
2759 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2760 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2761 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2762 arg_regs[3], arg_regs[4], arg_regs[5]);
2763
2764 result = gen_rtx_PARALLEL (mode, rtvec);
2765 return result;
2766 }
2767
2768 return gen_rtx_REG (mode, reg);
2769}
2770
2771rtx
cde0f3fd 2772mcore_function_value (const_tree valtype, const_tree func)
8f90be4c 2773{
ef4bddc2 2774 machine_mode mode;
8f90be4c
NC
2775 int unsigned_p;
2776
2777 mode = TYPE_MODE (valtype);
2778
cde0f3fd 2779 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2780 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2781
2782 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2783}
2784
2785/* Define where to put the arguments to a function.
2786 Value is zero to push the argument on the stack,
2787 or a hard register in which to store the argument.
2788
2789 MODE is the argument's machine mode.
2790 TYPE is the data type of the argument (as a tree).
2791 This is null for libcalls where that information may
2792 not be available.
2793 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2794 the preceding args and about the function being called.
2795 NAMED is nonzero if this argument is a named parameter
2796 (otherwise it is an extra parameter matching an ellipsis).
2797
2798 On MCore the first args are normally in registers
2799 and the rest are pushed. Any arg that starts within the first
2800 NPARM_REGS words is at least partially passed in a register unless
2801 its data type forbids. */
08903e08 2802
4665ac17 2803static rtx
ef4bddc2 2804mcore_function_arg (cumulative_args_t cum, machine_mode mode,
4665ac17 2805 const_tree type, bool named)
8f90be4c
NC
2806{
2807 int arg_reg;
2808
88042663 2809 if (! named || mode == VOIDmode)
8f90be4c
NC
2810 return 0;
2811
fe984136 2812 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2813 return 0;
2814
d5cc9181 2815 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2816
2817 if (arg_reg < NPARM_REGS)
2818 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2819
2820 return 0;
2821}
2822
4665ac17 2823static void
ef4bddc2 2824mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
4665ac17
NF
2825 const_tree type, bool named ATTRIBUTE_UNUSED)
2826{
d5cc9181
JR
2827 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2828
4665ac17
NF
2829 *cum = (ROUND_REG (*cum, mode)
2830 + (int)named * mcore_num_arg_regs (mode, type));
2831}
2832
c2ed6cf8 2833static unsigned int
ef4bddc2 2834mcore_function_arg_boundary (machine_mode mode,
c2ed6cf8
NF
2835 const_tree type ATTRIBUTE_UNUSED)
2836{
2837 /* Doubles must be aligned to an 8 byte boundary. */
2838 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2839 ? BIGGEST_ALIGNMENT
2840 : PARM_BOUNDARY);
2841}
2842
78a52f11
RH
2843/* Returns the number of bytes of argument registers required to hold *part*
2844 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2845 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2846 registers, or entirely on the stack, then 0 is returned. CUM is the
2847 number of argument registers already used by earlier parameters to
2848 the function. */
08903e08 2849
78a52f11 2850static int
ef4bddc2 2851mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
78a52f11 2852 tree type, bool named)
8f90be4c 2853{
d5cc9181 2854 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2855
2856 if (named == 0)
2857 return 0;
2858
fe984136 2859 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2860 return 0;
2861
2862 /* REG is not the *hardware* register number of the register that holds
2863 the argument, it is the *argument* register number. So for example,
2864 the first argument to a function goes in argument register 0, which
2865 translates (for the MCore) into hardware register 2. The second
2866 argument goes into argument register 1, which translates into hardware
2867 register 3, and so on. NPARM_REGS is the number of argument registers
2868 supported by the target, not the maximum hardware register number of
2869 the target. */
2870 if (reg >= NPARM_REGS)
2871 return 0;
2872
2873 /* If the argument fits entirely in registers, return 0. */
2874 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2875 return 0;
2876
2877 /* The argument overflows the number of available argument registers.
2878 Compute how many argument registers have not yet been assigned to
2879 hold an argument. */
2880 reg = NPARM_REGS - reg;
2881
2882 /* Return partially in registers and partially on the stack. */
78a52f11 2883 return reg * UNITS_PER_WORD;
8f90be4c
NC
2884}
2885\f
a0ab749a 2886/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2887
8f90be4c 2888int
08903e08 2889mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2890{
2891 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2892}
2893
a0ab749a 2894/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2895
8f90be4c 2896int
08903e08 2897mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2898{
2899 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2900}
2901
2902/* Mark a DECL as being dllexport'd. */
08903e08 2903
8f90be4c 2904static void
08903e08 2905mcore_mark_dllexport (tree decl)
8f90be4c 2906{
cbd3488b 2907 const char * oldname;
8f90be4c
NC
2908 char * newname;
2909 rtx rtlname;
2910 tree idp;
2911
2912 rtlname = XEXP (DECL_RTL (decl), 0);
2913
6e1f65b5
NS
2914 if (GET_CODE (rtlname) == MEM)
2915 rtlname = XEXP (rtlname, 0);
2916 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2917 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2918
2919 if (mcore_dllexport_name_p (oldname))
2920 return; /* Already done. */
2921
5ead67f6 2922 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2923 sprintf (newname, "@e.%s", oldname);
2924
2925 /* We pass newname through get_identifier to ensure it has a unique
2926 address. RTL processing can sometimes peek inside the symbol ref
2927 and compare the string's addresses to see if two symbols are
2928 identical. */
2929 /* ??? At least I think that's why we do this. */
2930 idp = get_identifier (newname);
2931
2932 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2933 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2934}
2935
2936/* Mark a DECL as being dllimport'd. */
08903e08 2937
8f90be4c 2938static void
08903e08 2939mcore_mark_dllimport (tree decl)
8f90be4c 2940{
cbd3488b 2941 const char * oldname;
8f90be4c
NC
2942 char * newname;
2943 tree idp;
2944 rtx rtlname;
2945 rtx newrtl;
2946
2947 rtlname = XEXP (DECL_RTL (decl), 0);
2948
6e1f65b5
NS
2949 if (GET_CODE (rtlname) == MEM)
2950 rtlname = XEXP (rtlname, 0);
2951 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2952 oldname = XSTR (rtlname, 0);
8f90be4c 2953
6e1f65b5
NS
2954 gcc_assert (!mcore_dllexport_name_p (oldname));
2955 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2956 return; /* Already done. */
2957
2958 /* ??? One can well ask why we're making these checks here,
2959 and that would be a good question. */
2960
2961 /* Imported variables can't be initialized. */
2962 if (TREE_CODE (decl) == VAR_DECL
2963 && !DECL_VIRTUAL_P (decl)
2964 && DECL_INITIAL (decl))
2965 {
dee15844 2966 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2967 return;
2968 }
2969
2970 /* `extern' needn't be specified with dllimport.
2971 Specify `extern' now and hope for the best. Sigh. */
2972 if (TREE_CODE (decl) == VAR_DECL
2973 /* ??? Is this test for vtables needed? */
2974 && !DECL_VIRTUAL_P (decl))
2975 {
2976 DECL_EXTERNAL (decl) = 1;
2977 TREE_PUBLIC (decl) = 1;
2978 }
2979
5ead67f6 2980 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2981 sprintf (newname, "@i.__imp_%s", oldname);
2982
2983 /* We pass newname through get_identifier to ensure it has a unique
2984 address. RTL processing can sometimes peek inside the symbol ref
2985 and compare the string's addresses to see if two symbols are
2986 identical. */
2987 /* ??? At least I think that's why we do this. */
2988 idp = get_identifier (newname);
2989
f1c25d3b
KH
2990 newrtl = gen_rtx_MEM (Pmode,
2991 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2992 IDENTIFIER_POINTER (idp)));
2993 XEXP (DECL_RTL (decl), 0) = newrtl;
2994}
2995
2996static int
08903e08 2997mcore_dllexport_p (tree decl)
8f90be4c
NC
2998{
2999 if ( TREE_CODE (decl) != VAR_DECL
3000 && TREE_CODE (decl) != FUNCTION_DECL)
3001 return 0;
3002
91d231cb 3003 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3004}
3005
3006static int
08903e08 3007mcore_dllimport_p (tree decl)
8f90be4c
NC
3008{
3009 if ( TREE_CODE (decl) != VAR_DECL
3010 && TREE_CODE (decl) != FUNCTION_DECL)
3011 return 0;
3012
91d231cb 3013 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3014}
3015
fb49053f 3016/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3017 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3018
3019static void
08903e08 3020mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3021{
8f90be4c
NC
3022 /* Mark the decl so we can tell from the rtl whether the object is
3023 dllexport'd or dllimport'd. */
3024 if (mcore_dllexport_p (decl))
3025 mcore_mark_dllexport (decl);
3026 else if (mcore_dllimport_p (decl))
3027 mcore_mark_dllimport (decl);
3028
3029 /* It might be that DECL has already been marked as dllimport, but
3030 a subsequent definition nullified that. The attribute is gone
3031 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3032 else if ((TREE_CODE (decl) == FUNCTION_DECL
3033 || TREE_CODE (decl) == VAR_DECL)
3034 && DECL_RTL (decl) != NULL_RTX
3035 && GET_CODE (DECL_RTL (decl)) == MEM
3036 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3037 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3038 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3039 {
3cce094d 3040 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3041 tree idp = get_identifier (oldname + 9);
f1c25d3b 3042 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3043
3044 XEXP (DECL_RTL (decl), 0) = newrtl;
3045
3046 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3047 ??? We leave these alone for now. */
3048 }
3049}
3050
772c5265
RH
3051/* Undo the effects of the above. */
3052
3053static const char *
08903e08 3054mcore_strip_name_encoding (const char * str)
772c5265
RH
3055{
3056 return str + (str[0] == '@' ? 3 : 0);
3057}
3058
8f90be4c
NC
3059/* MCore specific attribute support.
3060 dllexport - for exporting a function/variable that will live in a dll
3061 dllimport - for importing a function/variable from a dll
3062 naked - do not create a function prologue/epilogue. */
8f90be4c 3063
91d231cb
JM
3064/* Handle a "naked" attribute; arguments as in
3065 struct attribute_spec.handler. */
08903e08 3066
91d231cb 3067static tree
08903e08
SB
3068mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3069 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3070{
d45eae79 3071 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3072 {
29d08eba
JM
3073 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3074 name);
91d231cb 3075 *no_add_attrs = true;
8f90be4c
NC
3076 }
3077
91d231cb 3078 return NULL_TREE;
8f90be4c
NC
3079}
3080
ae46c4e0
RH
3081/* ??? It looks like this is PE specific? Oh well, this is what the
3082 old code did as well. */
8f90be4c 3083
ae46c4e0 3084static void
08903e08 3085mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3086{
3087 int len;
0139adca 3088 const char * name;
8f90be4c 3089 char * string;
f27cd94d 3090 const char * prefix;
8f90be4c
NC
3091
3092 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3093
3094 /* Strip off any encoding in name. */
772c5265 3095 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3096
3097 /* The object is put in, for example, section .text$foo.
3098 The linker will then ultimately place them in .text
3099 (everything from the $ on is stripped). */
3100 if (TREE_CODE (decl) == FUNCTION_DECL)
3101 prefix = ".text$";
f710504c 3102 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3103 section might have relocs against it. */
4e4d733e 3104 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3105 prefix = ".rdata$";
3106 else
3107 prefix = ".data$";
3108
3109 len = strlen (name) + strlen (prefix);
5ead67f6 3110 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3111
3112 sprintf (string, "%s%s", prefix, name);
3113
f961457f 3114 set_decl_section_name (decl, string);
8f90be4c
NC
3115}
3116
3117int
08903e08 3118mcore_naked_function_p (void)
8f90be4c 3119{
91d231cb 3120 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3121}
7c262518 3122
d45eae79
SL
3123static bool
3124mcore_warn_func_return (tree decl)
3125{
3126 /* Naked functions are implemented entirely in assembly, including the
3127 return sequence, so suppress warnings about this. */
3128 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3129}
3130
ede75ee8 3131#ifdef OBJECT_FORMAT_ELF
7c262518 3132static void
c18a5b6c
MM
3133mcore_asm_named_section (const char *name,
3134 unsigned int flags ATTRIBUTE_UNUSED,
3135 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3136{
3137 fprintf (asm_out_file, "\t.section %s\n", name);
3138}
ede75ee8 3139#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3140
dc7efe6e
KH
3141/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3142
09a2b93a
KH
3143static void
3144mcore_external_libcall (rtx fun)
3145{
3146 fprintf (asm_out_file, "\t.import\t");
3147 assemble_name (asm_out_file, XSTR (fun, 0));
3148 fprintf (asm_out_file, "\n");
3149}
3150
dc7efe6e
KH
3151/* Worker function for TARGET_RETURN_IN_MEMORY. */
3152
09a2b93a 3153static bool
586de218 3154mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3155{
586de218 3156 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3157 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3158}
71e0af3c
RH
3159
3160/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3161 Output assembler code for a block containing the constant parts
3162 of a trampoline, leaving space for the variable parts.
3163
3164 On the MCore, the trampoline looks like:
3165 lrw r1, function
3166 lrw r13, area
3167 jmp r13
3168 or r0, r0
3169 .literals */
3170
3171static void
3172mcore_asm_trampoline_template (FILE *f)
3173{
3174 fprintf (f, "\t.short 0x7102\n");
3175 fprintf (f, "\t.short 0x7d02\n");
3176 fprintf (f, "\t.short 0x00cd\n");
3177 fprintf (f, "\t.short 0x1e00\n");
3178 fprintf (f, "\t.long 0\n");
3179 fprintf (f, "\t.long 0\n");
3180}
3181
3182/* Worker function for TARGET_TRAMPOLINE_INIT. */
3183
3184static void
3185mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3186{
3187 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3188 rtx mem;
3189
3190 emit_block_move (m_tramp, assemble_trampoline_template (),
3191 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3192
3193 mem = adjust_address (m_tramp, SImode, 8);
3194 emit_move_insn (mem, chain_value);
3195 mem = adjust_address (m_tramp, SImode, 12);
3196 emit_move_insn (mem, fnaddr);
3197}
1a627b35
RS
3198
3199/* Implement TARGET_LEGITIMATE_CONSTANT_P
3200
3201 On the MCore, allow anything but a double. */
3202
3203static bool
ef4bddc2 3204mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1a627b35
RS
3205{
3206 return GET_CODE (x) != CONST_DOUBLE;
3207}
e7c6980e
AS
3208
3209/* Helper function for `mcore_legitimate_address_p'. */
3210
3211static bool
3212mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3213{
3214 if (strict_p)
3215 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3216 else
3217 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3218}
3219
3220static bool
3221mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3222{
3223 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3224}
3225
3226/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3227 and for DI is 0..56 because we use two SI loads, etc. */
3228
3229static bool
3230mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3231{
3232 if (CONST_INT_P (op))
3233 {
3234 if (GET_MODE_SIZE (mode) >= 4
3235 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3236 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3237 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3238 return true;
3239 if (GET_MODE_SIZE (mode) == 2
3240 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3241 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3242 return true;
3243 if (GET_MODE_SIZE (mode) == 1
3244 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3245 return true;
3246 }
3247 return false;
3248}
3249
3250
3251/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3252
3253 Allow REG
3254 REG + disp */
3255
3256static bool
3257mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3258 addr_space_t as)
3259{
3260 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3261
3262 if (mcore_base_register_rtx_p (x, strict_p))
3263 return true;
3264 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3265 {
3266 rtx xop0 = XEXP (x, 0);
3267 rtx xop1 = XEXP (x, 1);
3268 if (mcore_base_register_rtx_p (xop0, strict_p)
3269 && mcore_legitimate_index_p (mode, xop1))
3270 return true;
3271 if (mcore_base_register_rtx_p (xop1, strict_p)
3272 && mcore_legitimate_index_p (mode, xop0))
3273 return true;
3274 }
3275
3276 return false;
3277}
3278
f939c3e6
RS
3279/* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
3280 even registers. */
3281
3282static bool
3283mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3284{
3285 if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
3286 return (regno & 1) == 0;
3287
3288 return regno < 18;
3289}
99e1629f
RS
3290
3291/* Implement TARGET_MODES_TIEABLE_P. */
3292
3293static bool
3294mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3295{
3296 return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
3297}