]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
99dee823 2 Copyright (C) 1993-2021 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
8fcc61f8
RS
20#define IN_TARGET_CODE 1
21
bc27e96c 22#include "config.h"
4bd048ef 23#include "system.h"
4977bab6 24#include "coretypes.h"
c7131fb2 25#include "backend.h"
e11c4407 26#include "target.h"
4816b8e4 27#include "rtl.h"
e11c4407 28#include "tree.h"
c7131fb2 29#include "df.h"
4d0cdd0c 30#include "memmodel.h"
e11c4407
AM
31#include "tm_p.h"
32#include "stringpool.h"
314e6352 33#include "attribs.h"
e11c4407
AM
34#include "emit-rtl.h"
35#include "diagnostic-core.h"
d8a2d370
DN
36#include "stor-layout.h"
37#include "varasm.h"
d8a2d370 38#include "calls.h"
8f90be4c 39#include "mcore.h"
8f90be4c 40#include "output.h"
36566b39 41#include "explow.h"
8f90be4c 42#include "expr.h"
60393bbc 43#include "cfgrtl.h"
9b2b7279 44#include "builtins.h"
0f8012fb 45#include "regs.h"
8f90be4c 46
994c5d85 47/* This file should be included last. */
d58627a0
RS
48#include "target-def.h"
49
8f90be4c
NC
50/* For dumping information about frame sizes. */
51char * mcore_current_function_name = 0;
52long mcore_current_compilation_timestamp = 0;
53
54/* Global variables for machine-dependent things. */
55
8f90be4c
NC
56/* Provides the class number of the smallest class containing
57 reg number. */
5a82ecd9 58const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
59{
60 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
62 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
63 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
64 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
65};
66
f27cd94d
NC
67struct mcore_frame
68{
08903e08
SB
69 int arg_size; /* Stdarg spills (bytes). */
70 int reg_size; /* Non-volatile reg saves (bytes). */
71 int reg_mask; /* Non-volatile reg saves. */
72 int local_size; /* Locals. */
73 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
74 int pad_outbound;
75 int pad_local;
76 int pad_reg;
77 /* Describe the steps we'll use to grow it. */
08903e08 78#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
79 int growth[MAX_STACK_GROWS];
80 int arg_offset;
81 int reg_offset;
82 int reg_growth;
83 int local_growth;
84};
85
86typedef enum
87{
88 COND_NO,
89 COND_MOV_INSN,
90 COND_CLR_INSN,
91 COND_INC_INSN,
92 COND_DEC_INSN,
93 COND_BRANCH_INSN
94}
95cond_type;
96
08903e08
SB
97static void output_stack_adjust (int, int);
98static int calc_live_regs (int *);
e0416079 99static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
ef4bddc2 100static const char * output_inline_const (machine_mode, rtx *);
08903e08 101static void layout_mcore_frame (struct mcore_frame *);
e7056ca4
RS
102static void mcore_setup_incoming_varargs (cumulative_args_t,
103 const function_arg_info &,
104 int *, int);
08903e08 105static cond_type is_cond_candidate (rtx);
6251fe93 106static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
b32d5189 107static rtx_insn *conditionalize_block (rtx_insn *);
08903e08
SB
108static void conditionalize_optimization (void);
109static void mcore_reorg (void);
ef4bddc2 110static rtx handle_structs_in_regs (machine_mode, const_tree, int);
08903e08
SB
111static void mcore_mark_dllexport (tree);
112static void mcore_mark_dllimport (tree);
113static int mcore_dllexport_p (tree);
114static int mcore_dllimport_p (tree);
08903e08 115static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 116#ifdef OBJECT_FORMAT_ELF
08903e08 117static void mcore_asm_named_section (const char *,
c18a5b6c 118 unsigned int, tree);
ede75ee8 119#endif
349f851e 120static void mcore_print_operand (FILE *, rtx, int);
cc8ca59e 121static void mcore_print_operand_address (FILE *, machine_mode, rtx);
349f851e 122static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
123static void mcore_unique_section (tree, int);
124static void mcore_encode_section_info (tree, rtx, int);
125static const char *mcore_strip_name_encoding (const char *);
d96be87b
JBG
126static int mcore_const_costs (rtx, RTX_CODE);
127static int mcore_and_cost (rtx);
128static int mcore_ior_cost (rtx);
e548c9df 129static bool mcore_rtx_costs (rtx, machine_mode, int, int,
68f932c4 130 int *, bool);
09a2b93a 131static void mcore_external_libcall (rtx);
586de218 132static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 133static int mcore_arg_partial_bytes (cumulative_args_t,
a7c81bc1 134 const function_arg_info &);
d5cc9181 135static rtx mcore_function_arg (cumulative_args_t,
6783fdb7 136 const function_arg_info &);
d5cc9181 137static void mcore_function_arg_advance (cumulative_args_t,
6930c98c 138 const function_arg_info &);
ef4bddc2 139static unsigned int mcore_function_arg_boundary (machine_mode,
c2ed6cf8 140 const_tree);
71e0af3c
RH
141static void mcore_asm_trampoline_template (FILE *);
142static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 143static bool mcore_warn_func_return (tree);
c5387660 144static void mcore_option_override (void);
ef4bddc2 145static bool mcore_legitimate_constant_p (machine_mode, rtx);
e7c6980e
AS
146static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
147 addr_space_t);
f939c3e6 148static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
99e1629f 149static bool mcore_modes_tieable_p (machine_mode, machine_mode);
5a82ecd9
ILT
150\f
151/* MCore specific attributes. */
152
153static const struct attribute_spec mcore_attribute_table[] =
154{
4849deb1
JJ
155 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
156 affects_type_identity, handler, exclude } */
157 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
158 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
159 { "naked", 0, 0, true, false, false, false,
160 mcore_handle_naked_attribute, NULL },
161 { NULL, 0, 0, false, false, false, false, NULL, NULL }
5a82ecd9 162};
672a6f42
NB
163\f
164/* Initialize the GCC target structure. */
09a2b93a
KH
165#undef TARGET_ASM_EXTERNAL_LIBCALL
166#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
167
b2ca3702 168#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
169#undef TARGET_MERGE_DECL_ATTRIBUTES
170#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
171#endif
172
301d03af 173#ifdef OBJECT_FORMAT_ELF
08903e08 174#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 175#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 176#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
177#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
178#endif
179
349f851e
NF
180#undef TARGET_PRINT_OPERAND
181#define TARGET_PRINT_OPERAND mcore_print_operand
182#undef TARGET_PRINT_OPERAND_ADDRESS
183#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
184#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
185#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
186
08903e08
SB
187#undef TARGET_ATTRIBUTE_TABLE
188#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
189#undef TARGET_ASM_UNIQUE_SECTION
190#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
191#undef TARGET_ASM_FUNCTION_RODATA_SECTION
192#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
193#undef TARGET_ENCODE_SECTION_INFO
194#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
195#undef TARGET_STRIP_NAME_ENCODING
196#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
197#undef TARGET_RTX_COSTS
198#define TARGET_RTX_COSTS mcore_rtx_costs
199#undef TARGET_ADDRESS_COST
b413068c 200#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
201#undef TARGET_MACHINE_DEPENDENT_REORG
202#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 203
cde0f3fd
PB
204#undef TARGET_PROMOTE_FUNCTION_MODE
205#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 206#undef TARGET_PROMOTE_PROTOTYPES
586de218 207#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 208
09a2b93a
KH
209#undef TARGET_RETURN_IN_MEMORY
210#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
211#undef TARGET_MUST_PASS_IN_STACK
212#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
213#undef TARGET_PASS_BY_REFERENCE
214#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
215#undef TARGET_ARG_PARTIAL_BYTES
216#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
217#undef TARGET_FUNCTION_ARG
218#define TARGET_FUNCTION_ARG mcore_function_arg
219#undef TARGET_FUNCTION_ARG_ADVANCE
220#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
221#undef TARGET_FUNCTION_ARG_BOUNDARY
222#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
223
224#undef TARGET_SETUP_INCOMING_VARARGS
225#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
226
71e0af3c
RH
227#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
228#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
229#undef TARGET_TRAMPOLINE_INIT
230#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
231
c5387660
JM
232#undef TARGET_OPTION_OVERRIDE
233#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 234
1a627b35
RS
235#undef TARGET_LEGITIMATE_CONSTANT_P
236#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
e7c6980e
AS
237#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
238#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
1a627b35 239
d81db636
SB
240#undef TARGET_LRA_P
241#define TARGET_LRA_P hook_bool_void_false
242
d45eae79
SL
243#undef TARGET_WARN_FUNC_RETURN
244#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
245
f939c3e6
RS
246#undef TARGET_HARD_REGNO_MODE_OK
247#define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
248
99e1629f
RS
249#undef TARGET_MODES_TIEABLE_P
250#define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
251
58e17cf8
RS
252#undef TARGET_CONSTANT_ALIGNMENT
253#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
254
1169a206
NC
255#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
256#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
257
f6897b10 258struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 259\f
8f90be4c
NC
260/* Adjust the stack and return the number of bytes taken to do it. */
261static void
08903e08 262output_stack_adjust (int direction, int size)
8f90be4c 263{
4816b8e4 264 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
265 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
266 {
f1c25d3b 267 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 268 rtx memref;
08903e08 269
8f90be4c
NC
270 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
271 do
272 {
273 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 274 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
275 MEM_VOLATILE_P (memref) = 1;
276 emit_insn (gen_movsi (memref, stack_pointer_rtx));
277 size -= mcore_stack_increment;
278 }
279 while (size > mcore_stack_increment);
280
4816b8e4
NC
281 /* SIZE is now the residual for the last adjustment,
282 which doesn't require a probe. */
8f90be4c
NC
283 }
284
285 if (size)
286 {
287 rtx insn;
288 rtx val = GEN_INT (size);
289
290 if (size > 32)
291 {
f1c25d3b 292 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
293 emit_insn (gen_movsi (nval, val));
294 val = nval;
295 }
296
297 if (direction > 0)
298 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
299 else
300 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
301
302 emit_insn (insn);
303 }
304}
305
4816b8e4
NC
306/* Work out the registers which need to be saved,
307 both as a mask and a count. */
308
8f90be4c 309static int
08903e08 310calc_live_regs (int * count)
8f90be4c
NC
311{
312 int reg;
313 int live_regs_mask = 0;
314
315 * count = 0;
316
317 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
318 {
a365fa06 319 if (df_regs_ever_live_p (reg) && !call_used_or_fixed_reg_p (reg))
8f90be4c
NC
320 {
321 (*count)++;
322 live_regs_mask |= (1 << reg);
323 }
324 }
325
326 return live_regs_mask;
327}
328
329/* Print the operand address in x to the stream. */
4816b8e4 330
349f851e 331static void
cc8ca59e 332mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
8f90be4c
NC
333{
334 switch (GET_CODE (x))
335 {
336 case REG:
337 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
338 break;
339
340 case PLUS:
341 {
342 rtx base = XEXP (x, 0);
343 rtx index = XEXP (x, 1);
344
345 if (GET_CODE (base) != REG)
346 {
347 /* Ensure that BASE is a register (one of them must be). */
348 rtx temp = base;
349 base = index;
350 index = temp;
351 }
352
353 switch (GET_CODE (index))
354 {
355 case CONST_INT:
fd7b8952
KG
356 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
357 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
358 break;
359
360 default:
6e1f65b5 361 gcc_unreachable ();
8f90be4c
NC
362 }
363 }
364
365 break;
366
367 default:
368 output_addr_const (stream, x);
369 break;
370 }
371}
372
349f851e
NF
373static bool
374mcore_print_operand_punct_valid_p (unsigned char code)
375{
376 return (code == '.' || code == '#' || code == '*' || code == '^'
377 || code == '!');
378}
379
8f90be4c
NC
380/* Print operand x (an rtx) in assembler syntax to file stream
381 according to modifier code.
382
112cdef5 383 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
384 a double word value
385 'O' print a constant without the #
386 'M' print a constant as its negative
387 'P' print log2 of a power of two
388 'Q' print log2 of an inverse of a power of two
389 'U' print register for ldm/stm instruction
4816b8e4
NC
390 'X' print byte number for xtrbN instruction. */
391
349f851e 392static void
08903e08 393mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
394{
395 switch (code)
396 {
397 case 'N':
398 if (INTVAL(x) == -1)
399 fprintf (asm_out_file, "32");
400 else
401 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
402 break;
403 case 'P':
6e3a343d 404 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
405 break;
406 case 'Q':
407 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
408 break;
409 case 'O':
fd7b8952 410 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
411 break;
412 case 'M':
fd7b8952 413 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
414 break;
415 case 'R':
416 /* Next location along in memory or register. */
417 switch (GET_CODE (x))
418 {
419 case REG:
420 fputs (reg_names[REGNO (x) + 1], (stream));
421 break;
422 case MEM:
b72f00af 423 mcore_print_operand_address
cc8ca59e 424 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
425 break;
426 default:
6e1f65b5 427 gcc_unreachable ();
8f90be4c
NC
428 }
429 break;
430 case 'U':
431 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
432 reg_names[REGNO (x) + 3]);
433 break;
434 case 'x':
fd7b8952 435 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
436 break;
437 case 'X':
fd7b8952 438 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
439 break;
440
441 default:
442 switch (GET_CODE (x))
443 {
444 case REG:
445 fputs (reg_names[REGNO (x)], (stream));
446 break;
447 case MEM:
cc8ca59e 448 output_address (GET_MODE (x), XEXP (x, 0));
8f90be4c
NC
449 break;
450 default:
451 output_addr_const (stream, x);
452 break;
453 }
454 break;
455 }
456}
457
458/* What does a constant cost ? */
4816b8e4 459
3c50106f 460static int
08903e08 461mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 462{
6e3a343d 463 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
464
465 /* Easy constants. */
466 if ( CONST_OK_FOR_I (val)
467 || CONST_OK_FOR_M (val)
468 || CONST_OK_FOR_N (val)
469 || (code == PLUS && CONST_OK_FOR_L (val)))
470 return 1;
471 else if (code == AND
472 && ( CONST_OK_FOR_M (~val)
473 || CONST_OK_FOR_N (~val)))
474 return 2;
475 else if (code == PLUS
476 && ( CONST_OK_FOR_I (-val)
477 || CONST_OK_FOR_M (-val)
478 || CONST_OK_FOR_N (-val)))
479 return 2;
480
481 return 5;
482}
483
484/* What does an and instruction cost - we do this b/c immediates may
485 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
486 out. Otherwise we'll get bad code (multiple reloads of the same const). */
487
3c50106f 488static int
08903e08 489mcore_and_cost (rtx x)
8f90be4c 490{
6e3a343d 491 HOST_WIDE_INT val;
8f90be4c
NC
492
493 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
494 return 2;
495
496 val = INTVAL (XEXP (x, 1));
497
4816b8e4 498 /* Do it directly. */
8f90be4c
NC
499 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
500 return 2;
501 /* Takes one instruction to load. */
502 else if (const_ok_for_mcore (val))
503 return 3;
504 /* Takes two instructions to load. */
505 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
506 return 4;
507
4816b8e4 508 /* Takes a lrw to load. */
8f90be4c
NC
509 return 5;
510}
511
4816b8e4
NC
512/* What does an or cost - see and_cost(). */
513
3c50106f 514static int
08903e08 515mcore_ior_cost (rtx x)
8f90be4c 516{
6e3a343d 517 HOST_WIDE_INT val;
8f90be4c
NC
518
519 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
520 return 2;
521
522 val = INTVAL (XEXP (x, 1));
523
4816b8e4 524 /* Do it directly with bclri. */
8f90be4c
NC
525 if (CONST_OK_FOR_M (val))
526 return 2;
4816b8e4 527 /* Takes one instruction to load. */
8f90be4c
NC
528 else if (const_ok_for_mcore (val))
529 return 3;
4816b8e4 530 /* Takes two instructions to load. */
8f90be4c
NC
531 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
532 return 4;
533
4816b8e4 534 /* Takes a lrw to load. */
8f90be4c
NC
535 return 5;
536}
537
3c50106f 538static bool
e548c9df
AM
539mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
540 int opno ATTRIBUTE_UNUSED,
68f932c4 541 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f 542{
e548c9df
AM
543 int code = GET_CODE (x);
544
3c50106f
RH
545 switch (code)
546 {
547 case CONST_INT:
5a82ecd9 548 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
549 return true;
550 case CONST:
551 case LABEL_REF:
552 case SYMBOL_REF:
553 *total = 5;
554 return true;
555 case CONST_DOUBLE:
556 *total = 10;
557 return true;
558
559 case AND:
560 *total = COSTS_N_INSNS (mcore_and_cost (x));
561 return true;
562
563 case IOR:
564 *total = COSTS_N_INSNS (mcore_ior_cost (x));
565 return true;
566
567 case DIV:
568 case UDIV:
569 case MOD:
570 case UMOD:
571 case FLOAT:
572 case FIX:
573 *total = COSTS_N_INSNS (100);
574 return true;
575
576 default:
577 return false;
578 }
579}
580
f90b7a5a
PB
581/* Prepare the operands for a comparison. Return whether the branch/setcc
582 should reverse the operands. */
4816b8e4 583
f90b7a5a
PB
584bool
585mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 586{
f90b7a5a
PB
587 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
588 bool invert;
589
8f90be4c
NC
590 if (GET_CODE (op1) == CONST_INT)
591 {
6e3a343d 592 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
593
594 switch (code)
595 {
f90b7a5a
PB
596 case GTU:
597 /* Unsigned > 0 is the same as != 0; everything else is converted
598 below to LEU (reversed cmphs). */
599 if (val == 0)
600 code = NE;
601 break;
602
603 /* Check whether (LE A imm) can become (LT A imm + 1),
604 or (GT A imm) can become (GE A imm + 1). */
605 case GT:
8f90be4c
NC
606 case LE:
607 if (CONST_OK_FOR_J (val + 1))
608 {
f90b7a5a
PB
609 op1 = GEN_INT (val + 1);
610 code = code == LE ? LT : GE;
8f90be4c
NC
611 }
612 break;
613
614 default:
615 break;
616 }
617 }
f90b7a5a 618
8f90be4c
NC
619 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
620 op1 = force_reg (SImode, op1);
621
622 /* cmpnei: 0-31 (K immediate)
4816b8e4 623 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 624 invert = false;
8f90be4c
NC
625 switch (code)
626 {
4816b8e4 627 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 628 code = NE;
f90b7a5a 629 invert = true;
0c15dfc1 630 /* FALLTHRU */
4816b8e4
NC
631
632 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
633 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
634 op1 = force_reg (SImode, op1);
635 break;
636
4816b8e4 637 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 638 code = GT;
f90b7a5a 639 invert = true;
0c15dfc1 640 /* FALLTHRU */
4816b8e4
NC
641
642 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
643 if (GET_CODE (op1) == CONST_INT)
644 op1 = force_reg (SImode, op1);
645 break;
646
4816b8e4 647 case GE: /* Use inverted condition, cmplt. */
8f90be4c 648 code = LT;
f90b7a5a 649 invert = true;
0c15dfc1 650 /* FALLTHRU */
4816b8e4
NC
651
652 case LT: /* Use normal condition, cmplt. */
8f90be4c 653 if (GET_CODE (op1) == CONST_INT &&
08903e08 654 /* covered by btsti x,31. */
8f90be4c
NC
655 INTVAL (op1) != 0 &&
656 ! CONST_OK_FOR_J (INTVAL (op1)))
657 op1 = force_reg (SImode, op1);
658 break;
659
4816b8e4 660 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 661 /* We coped with unsigned > 0 above. */
6e1f65b5 662 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 663 code = LEU;
f90b7a5a 664 invert = true;
0c15dfc1 665 /* FALLTHRU */
4816b8e4 666
14bc6742 667 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
668 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
669 op1 = force_reg (SImode, op1);
670 break;
671
4816b8e4 672 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 673 code = GEU;
f90b7a5a 674 invert = true;
0c15dfc1 675 /* FALLTHRU */
4816b8e4
NC
676
677 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
678 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
679 op1 = force_reg (SImode, op1);
680 break;
681
682 default:
683 break;
684 }
685
f7df4a84 686 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
f90b7a5a 687 return invert;
8f90be4c
NC
688}
689
8f90be4c 690int
08903e08 691mcore_symbolic_address_p (rtx x)
8f90be4c
NC
692{
693 switch (GET_CODE (x))
694 {
695 case SYMBOL_REF:
696 case LABEL_REF:
697 return 1;
698 case CONST:
699 x = XEXP (x, 0);
700 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
701 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
702 && GET_CODE (XEXP (x, 1)) == CONST_INT);
703 default:
704 return 0;
705 }
706}
707
8f90be4c 708/* Functions to output assembly code for a function call. */
f27cd94d 709
8f90be4c 710char *
08903e08 711mcore_output_call (rtx operands[], int index)
8f90be4c
NC
712{
713 static char buffer[20];
714 rtx addr = operands [index];
715
716 if (REG_P (addr))
717 {
718 if (TARGET_CG_DATA)
719 {
6e1f65b5 720 gcc_assert (mcore_current_function_name);
8f90be4c
NC
721
722 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
723 "unknown", 1);
724 }
725
726 sprintf (buffer, "jsr\t%%%d", index);
727 }
728 else
729 {
730 if (TARGET_CG_DATA)
731 {
6e1f65b5
NS
732 gcc_assert (mcore_current_function_name);
733 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 734
6e1f65b5
NS
735 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
736 XSTR (addr, 0), 0);
8f90be4c
NC
737 }
738
739 sprintf (buffer, "jbsr\t%%%d", index);
740 }
741
742 return buffer;
743}
744
745/* Can we load a constant with a single instruction ? */
4816b8e4 746
54d58eaf 747int
6e3a343d 748const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
749{
750 if (value >= 0 && value <= 127)
751 return 1;
752
753 /* Try exact power of two. */
6e3a343d 754 if (CONST_OK_FOR_M (value))
8f90be4c
NC
755 return 1;
756
14bc6742 757 /* Try exact power of two - 1. */
6e3a343d 758 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
759 return 1;
760
761 return 0;
762}
763
764/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 765
8f90be4c 766int
6e3a343d 767mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 768{
6e3a343d 769 HOST_WIDE_INT x, y;
8f90be4c
NC
770
771 return try_constant_tricks (value, & x, & y) > 0;
772}
773
774/* Are we loading the constant using a not ? */
4816b8e4 775
8f90be4c 776int
6e3a343d 777mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 778{
6e3a343d 779 HOST_WIDE_INT x, y;
8f90be4c
NC
780
781 return try_constant_tricks (value, & x, & y) == 2;
782}
783
784/* Try tricks to load a constant inline and return the trick number if
785 success (0 is non-inlinable).
4816b8e4
NC
786
787 0: not inlinable
788 1: single instruction (do the usual thing)
789 2: single insn followed by a 'not'
790 3: single insn followed by a subi
791 4: single insn followed by an addi
792 5: single insn followed by rsubi
793 6: single insn followed by bseti
794 7: single insn followed by bclri
795 8: single insn followed by rotli
796 9: single insn followed by lsli
797 10: single insn followed by ixh
798 11: single insn followed by ixw. */
8f90be4c
NC
799
800static int
6e3a343d 801try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 802{
6e3a343d
NC
803 HOST_WIDE_INT i;
804 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
805
806 if (const_ok_for_mcore (value))
4816b8e4 807 return 1; /* Do the usual thing. */
8f90be4c 808
6e3a343d
NC
809 if (! TARGET_HARDLIT)
810 return 0;
811
812 if (const_ok_for_mcore (~value))
813 {
814 *x = ~value;
815 return 2;
816 }
817
818 for (i = 1; i <= 32; i++)
8f90be4c 819 {
6e3a343d 820 if (const_ok_for_mcore (value - i))
8f90be4c 821 {
6e3a343d
NC
822 *x = value - i;
823 *y = i;
824
825 return 3;
8f90be4c 826 }
6e3a343d
NC
827
828 if (const_ok_for_mcore (value + i))
8f90be4c 829 {
6e3a343d
NC
830 *x = value + i;
831 *y = i;
832
833 return 4;
8f90be4c 834 }
6e3a343d
NC
835 }
836
837 bit = 0x80000000ULL;
838
839 for (i = 0; i <= 31; i++)
840 {
841 if (const_ok_for_mcore (i - value))
8f90be4c 842 {
6e3a343d
NC
843 *x = i - value;
844 *y = i;
845
846 return 5;
8f90be4c 847 }
6e3a343d
NC
848
849 if (const_ok_for_mcore (value & ~bit))
8f90be4c 850 {
6e3a343d
NC
851 *y = bit;
852 *x = value & ~bit;
853 return 6;
8f90be4c 854 }
6e3a343d
NC
855
856 if (const_ok_for_mcore (value | bit))
8f90be4c 857 {
6e3a343d
NC
858 *y = ~bit;
859 *x = value | bit;
860
861 return 7;
8f90be4c 862 }
6e3a343d
NC
863
864 bit >>= 1;
865 }
866
867 shf = value;
868 rot = value;
869
870 for (i = 1; i < 31; i++)
871 {
872 int c;
873
874 /* MCore has rotate left. */
875 c = rot << 31;
876 rot >>= 1;
877 rot &= 0x7FFFFFFF;
878 rot |= c; /* Simulate rotate. */
879
880 if (const_ok_for_mcore (rot))
8f90be4c 881 {
6e3a343d
NC
882 *y = i;
883 *x = rot;
884
885 return 8;
886 }
887
888 if (shf & 1)
889 shf = 0; /* Can't use logical shift, low order bit is one. */
890
891 shf >>= 1;
892
893 if (shf != 0 && const_ok_for_mcore (shf))
894 {
895 *y = i;
896 *x = shf;
897
898 return 9;
8f90be4c
NC
899 }
900 }
6e3a343d
NC
901
902 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
903 {
904 *x = value / 3;
905
906 return 10;
907 }
908
909 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
910 {
911 *x = value / 5;
912
913 return 11;
914 }
8f90be4c
NC
915
916 return 0;
917}
918
8f90be4c
NC
919/* Check whether reg is dead at first. This is done by searching ahead
920 for either the next use (i.e., reg is live), a death note, or a set of
921 reg. Don't just use dead_or_set_p() since reload does not always mark
922 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
923 can ignore subregs by extracting the actual register. BRC */
924
8f90be4c 925int
b32d5189 926mcore_is_dead (rtx_insn *first, rtx reg)
8f90be4c 927{
b32d5189 928 rtx_insn *insn;
8f90be4c
NC
929
930 /* For mcore, subregs can't live independently of their parent regs. */
931 if (GET_CODE (reg) == SUBREG)
932 reg = SUBREG_REG (reg);
933
934 /* Dies immediately. */
935 if (dead_or_set_p (first, reg))
936 return 1;
937
938 /* Look for conclusive evidence of live/death, otherwise we have
939 to assume that it is live. */
940 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
941 {
b64925dc 942 if (JUMP_P (insn))
8f90be4c
NC
943 return 0; /* We lose track, assume it is alive. */
944
b64925dc 945 else if (CALL_P (insn))
8f90be4c
NC
946 {
947 /* Call's might use it for target or register parms. */
948 if (reg_referenced_p (reg, PATTERN (insn))
949 || find_reg_fusage (insn, USE, reg))
950 return 0;
951 else if (dead_or_set_p (insn, reg))
952 return 1;
953 }
b64925dc 954 else if (NONJUMP_INSN_P (insn))
8f90be4c
NC
955 {
956 if (reg_referenced_p (reg, PATTERN (insn)))
957 return 0;
958 else if (dead_or_set_p (insn, reg))
959 return 1;
960 }
961 }
962
1e5f1716 963 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
964 that control flow hid the use from us -- "I'm not dead yet". */
965 return 0;
966}
967
8f90be4c 968/* Count the number of ones in mask. */
4816b8e4 969
8f90be4c 970int
6e3a343d 971mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 972{
4816b8e4 973 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
974 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
975 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
976 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
977 mask = ((mask >> 8) + mask);
978
979 return (mask + (mask >> 16)) & 0xff;
980}
981
4816b8e4
NC
982/* Count the number of zeros in mask. */
983
8f90be4c 984int
6e3a343d 985mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
986{
987 return 32 - mcore_num_ones (mask);
988}
989
990/* Determine byte being masked. */
4816b8e4 991
8f90be4c 992int
08903e08 993mcore_byte_offset (unsigned int mask)
8f90be4c 994{
11f9ed1a 995 if (mask == 0x00ffffffL)
8f90be4c 996 return 0;
11f9ed1a 997 else if (mask == 0xff00ffffL)
8f90be4c 998 return 1;
11f9ed1a 999 else if (mask == 0xffff00ffL)
8f90be4c 1000 return 2;
11f9ed1a 1001 else if (mask == 0xffffff00L)
8f90be4c
NC
1002 return 3;
1003
1004 return -1;
1005}
1006
1007/* Determine halfword being masked. */
4816b8e4 1008
8f90be4c 1009int
08903e08 1010mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
1011{
1012 if (mask == 0x0000ffffL)
1013 return 0;
11f9ed1a 1014 else if (mask == 0xffff0000L)
8f90be4c
NC
1015 return 1;
1016
1017 return -1;
1018}
1019
1020/* Output a series of bseti's corresponding to mask. */
4816b8e4 1021
f27cd94d 1022const char *
08903e08 1023mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
1024{
1025 rtx out_operands[2];
1026 int bit;
1027
1028 out_operands[0] = dst;
1029
1030 for (bit = 0; bit < 32; bit++)
1031 {
1032 if ((mask & 0x1) == 0x1)
1033 {
1034 out_operands[1] = GEN_INT (bit);
1035
1036 output_asm_insn ("bseti\t%0,%1", out_operands);
1037 }
1038 mask >>= 1;
1039 }
1040
1041 return "";
1042}
1043
1044/* Output a series of bclri's corresponding to mask. */
4816b8e4 1045
f27cd94d 1046const char *
08903e08 1047mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1048{
1049 rtx out_operands[2];
1050 int bit;
1051
1052 out_operands[0] = dst;
1053
1054 for (bit = 0; bit < 32; bit++)
1055 {
1056 if ((mask & 0x1) == 0x0)
1057 {
1058 out_operands[1] = GEN_INT (bit);
1059
1060 output_asm_insn ("bclri\t%0,%1", out_operands);
1061 }
1062
1063 mask >>= 1;
1064 }
1065
1066 return "";
1067}
1068
1069/* Output a conditional move of two constants that are +/- 1 within each
1070 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1071 really worth the effort. */
4816b8e4 1072
f27cd94d 1073const char *
08903e08 1074mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1075{
6e3a343d
NC
1076 HOST_WIDE_INT load_value;
1077 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1078 rtx out_operands[4];
1079
1080 out_operands[0] = operands[0];
1081
4816b8e4 1082 /* Check to see which constant is loadable. */
8f90be4c
NC
1083 if (const_ok_for_mcore (INTVAL (operands[1])))
1084 {
1085 out_operands[1] = operands[1];
1086 out_operands[2] = operands[2];
1087 }
1088 else if (const_ok_for_mcore (INTVAL (operands[2])))
1089 {
1090 out_operands[1] = operands[2];
1091 out_operands[2] = operands[1];
1092
4816b8e4 1093 /* Complement test since constants are swapped. */
8f90be4c
NC
1094 cmp_t = (cmp_t == 0);
1095 }
1096 load_value = INTVAL (out_operands[1]);
1097 adjust_value = INTVAL (out_operands[2]);
1098
4816b8e4 1099 /* First output the test if folded into the pattern. */
8f90be4c
NC
1100
1101 if (test)
1102 output_asm_insn (test, operands);
1103
4816b8e4 1104 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1105 generated with a single instruction. maybe add general inlinable
1106 constants later (this will increase the # of patterns since the
4816b8e4 1107 instruction sequence has a different length attribute). */
8f90be4c
NC
1108 if (load_value >= 0 && load_value <= 127)
1109 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1110 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1111 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1112 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1113 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1114
4816b8e4 1115 /* Output the constant adjustment. */
8f90be4c
NC
1116 if (load_value > adjust_value)
1117 {
1118 if (cmp_t)
1119 output_asm_insn ("decf\t%0", out_operands);
1120 else
1121 output_asm_insn ("dect\t%0", out_operands);
1122 }
1123 else
1124 {
1125 if (cmp_t)
1126 output_asm_insn ("incf\t%0", out_operands);
1127 else
1128 output_asm_insn ("inct\t%0", out_operands);
1129 }
1130
1131 return "";
1132}
1133
1134/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1135 by an and (i.e. combine the not and the and into andn). BRC */
1136
f27cd94d 1137const char *
08903e08 1138mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1139{
6e3a343d 1140 HOST_WIDE_INT x, y;
8f90be4c 1141 rtx out_operands[3];
f27cd94d 1142 const char * load_op;
8f90be4c 1143 char buf[256];
6e1f65b5 1144 int trick_no;
8f90be4c 1145
6e1f65b5
NS
1146 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1147 gcc_assert (trick_no == 2);
8f90be4c
NC
1148
1149 out_operands[0] = operands[0];
6e3a343d 1150 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1151 out_operands[2] = operands[2];
1152
1153 if (x >= 0 && x <= 127)
1154 load_op = "movi\t%0,%1";
4816b8e4
NC
1155
1156 /* Try exact power of two. */
6e3a343d 1157 else if (CONST_OK_FOR_M (x))
8f90be4c 1158 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1159
1160 /* Try exact power of two - 1. */
6e3a343d 1161 else if (CONST_OK_FOR_N (x))
8f90be4c 1162 load_op = "bmaski\t%0,%N1";
4816b8e4 1163
6e3a343d
NC
1164 else
1165 {
1166 load_op = "BADMOVI-andn\t%0, %1";
1167 gcc_unreachable ();
1168 }
8f90be4c
NC
1169
1170 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1171 output_asm_insn (buf, out_operands);
1172
1173 return "";
1174}
1175
1176/* Output an inline constant. */
4816b8e4 1177
f27cd94d 1178static const char *
ef4bddc2 1179output_inline_const (machine_mode mode, rtx operands[])
8f90be4c 1180{
6e3a343d 1181 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1182 int trick_no;
1183 rtx out_operands[3];
1184 char buf[256];
1185 char load_op[256];
f27cd94d 1186 const char *dst_fmt;
6e3a343d 1187 HOST_WIDE_INT value;
8f90be4c
NC
1188
1189 value = INTVAL (operands[1]);
8f90be4c 1190
6e1f65b5
NS
1191 trick_no = try_constant_tricks (value, &x, &y);
1192 /* lrw's are handled separately: Large inlinable constants never get
1193 turned into lrw's. Our caller uses try_constant_tricks to back
1194 off to an lrw rather than calling this routine. */
1195 gcc_assert (trick_no != 0);
1196
8f90be4c
NC
1197 if (trick_no == 1)
1198 x = value;
1199
4816b8e4 1200 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1201 out_operands[0] = operands[0];
1202 out_operands[1] = GEN_INT (x);
1203
1204 if (trick_no > 2)
1205 out_operands[2] = GEN_INT (y);
1206
4816b8e4 1207 /* Select dst format based on mode. */
8f90be4c
NC
1208 if (mode == DImode && (! TARGET_LITTLE_END))
1209 dst_fmt = "%R0";
1210 else
1211 dst_fmt = "%0";
1212
1213 if (x >= 0 && x <= 127)
1214 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1215
8f90be4c 1216 /* Try exact power of two. */
6e3a343d 1217 else if (CONST_OK_FOR_M (x))
8f90be4c 1218 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1219
1220 /* Try exact power of two - 1. */
6e3a343d 1221 else if (CONST_OK_FOR_N (x))
8f90be4c 1222 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1223
6e3a343d
NC
1224 else
1225 {
1226 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1227 gcc_unreachable ();
1228 }
8f90be4c
NC
1229
1230 switch (trick_no)
1231 {
1232 case 1:
1233 strcpy (buf, load_op);
1234 break;
1235 case 2: /* not */
6e3a343d 1236 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1237 break;
1238 case 3: /* add */
6e3a343d 1239 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1240 break;
1241 case 4: /* sub */
6e3a343d 1242 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1243 break;
1244 case 5: /* rsub */
4816b8e4 1245 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1246 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1247 break;
6e3a343d
NC
1248 case 6: /* bseti */
1249 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1250 break;
1251 case 7: /* bclr */
6e3a343d 1252 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1253 break;
1254 case 8: /* rotl */
6e3a343d 1255 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1256 break;
1257 case 9: /* lsl */
6e3a343d 1258 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1259 break;
1260 case 10: /* ixh */
6e3a343d 1261 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1262 break;
1263 case 11: /* ixw */
6e3a343d 1264 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1265 break;
1266 default:
1267 return "";
1268 }
1269
1270 output_asm_insn (buf, out_operands);
1271
1272 return "";
1273}
1274
1275/* Output a move of a word or less value. */
4816b8e4 1276
f27cd94d 1277const char *
08903e08 1278mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
ef4bddc2 1279 machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1280{
1281 rtx dst = operands[0];
1282 rtx src = operands[1];
1283
1284 if (GET_CODE (dst) == REG)
1285 {
1286 if (GET_CODE (src) == REG)
1287 {
1288 if (REGNO (src) == CC_REG) /* r-c */
1289 return "mvc\t%0";
1290 else
1291 return "mov\t%0,%1"; /* r-r*/
1292 }
1293 else if (GET_CODE (src) == MEM)
1294 {
1295 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1296 return "lrw\t%0,[%1]"; /* a-R */
1297 else
f0f4da32
RS
1298 switch (GET_MODE (src)) /* r-m */
1299 {
4e10a5a7 1300 case E_SImode:
f0f4da32 1301 return "ldw\t%0,%1";
4e10a5a7 1302 case E_HImode:
f0f4da32 1303 return "ld.h\t%0,%1";
4e10a5a7 1304 case E_QImode:
f0f4da32
RS
1305 return "ld.b\t%0,%1";
1306 default:
6e1f65b5 1307 gcc_unreachable ();
f0f4da32 1308 }
8f90be4c
NC
1309 }
1310 else if (GET_CODE (src) == CONST_INT)
1311 {
6e3a343d 1312 HOST_WIDE_INT x, y;
8f90be4c
NC
1313
1314 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1315 return "movi\t%0,%1";
1316 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1317 return "bgeni\t%0,%P1\t// %1 %x1";
1318 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1319 return "bmaski\t%0,%N1\t// %1 %x1";
1320 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1321 return output_inline_const (SImode, operands); /* 1-2 insns */
1322 else
4816b8e4 1323 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1324 }
1325 else
4816b8e4 1326 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1327 }
1328 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1329 switch (GET_MODE (dst))
1330 {
4e10a5a7 1331 case E_SImode:
f0f4da32 1332 return "stw\t%1,%0";
4e10a5a7 1333 case E_HImode:
f0f4da32 1334 return "st.h\t%1,%0";
4e10a5a7 1335 case E_QImode:
f0f4da32
RS
1336 return "st.b\t%1,%0";
1337 default:
6e1f65b5 1338 gcc_unreachable ();
f0f4da32 1339 }
8f90be4c 1340
6e1f65b5 1341 gcc_unreachable ();
8f90be4c
NC
1342}
1343
8f90be4c
NC
1344/* Return a sequence of instructions to perform DI or DF move.
1345 Since the MCORE cannot move a DI or DF in one instruction, we have
1346 to take care when we see overlapping source and dest registers. */
4816b8e4 1347
f27cd94d 1348const char *
ef4bddc2 1349mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1350{
1351 rtx dst = operands[0];
1352 rtx src = operands[1];
1353
1354 if (GET_CODE (dst) == REG)
1355 {
1356 if (GET_CODE (src) == REG)
1357 {
1358 int dstreg = REGNO (dst);
1359 int srcreg = REGNO (src);
4816b8e4 1360
8f90be4c
NC
1361 /* Ensure the second source not overwritten. */
1362 if (srcreg + 1 == dstreg)
1363 return "mov %R0,%R1\n\tmov %0,%1";
1364 else
1365 return "mov %0,%1\n\tmov %R0,%R1";
1366 }
1367 else if (GET_CODE (src) == MEM)
1368 {
d72fe292 1369 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1370 int dstreg = REGNO (dst);
1371 int basereg = -1;
1372
1373 if (GET_CODE (memexp) == LABEL_REF)
1374 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1375 else if (GET_CODE (memexp) == REG)
1376 basereg = REGNO (memexp);
1377 else if (GET_CODE (memexp) == PLUS)
1378 {
1379 if (GET_CODE (XEXP (memexp, 0)) == REG)
1380 basereg = REGNO (XEXP (memexp, 0));
1381 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1382 basereg = REGNO (XEXP (memexp, 1));
1383 else
6e1f65b5 1384 gcc_unreachable ();
8f90be4c
NC
1385 }
1386 else
6e1f65b5 1387 gcc_unreachable ();
8f90be4c 1388
4816b8e4 1389 /* ??? length attribute is wrong here. */
8f90be4c
NC
1390 if (dstreg == basereg)
1391 {
4816b8e4 1392 /* Just load them in reverse order. */
8f90be4c 1393 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1394
8f90be4c 1395 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1396 and then fall through. */
8f90be4c
NC
1397 }
1398 else
1399 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1400 }
1401 else if (GET_CODE (src) == CONST_INT)
1402 {
1403 if (TARGET_LITTLE_END)
1404 {
1405 if (CONST_OK_FOR_I (INTVAL (src)))
1406 output_asm_insn ("movi %0,%1", operands);
1407 else if (CONST_OK_FOR_M (INTVAL (src)))
1408 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1409 else if (CONST_OK_FOR_N (INTVAL (src)))
1410 output_asm_insn ("bmaski %0,%N1", operands);
1411 else
6e1f65b5 1412 gcc_unreachable ();
8f90be4c
NC
1413
1414 if (INTVAL (src) < 0)
1415 return "bmaski %R0,32";
1416 else
1417 return "movi %R0,0";
1418 }
1419 else
1420 {
1421 if (CONST_OK_FOR_I (INTVAL (src)))
1422 output_asm_insn ("movi %R0,%1", operands);
1423 else if (CONST_OK_FOR_M (INTVAL (src)))
1424 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1425 else if (CONST_OK_FOR_N (INTVAL (src)))
1426 output_asm_insn ("bmaski %R0,%N1", operands);
1427 else
6e1f65b5 1428 gcc_unreachable ();
6e3a343d 1429
8f90be4c
NC
1430 if (INTVAL (src) < 0)
1431 return "bmaski %0,32";
1432 else
1433 return "movi %0,0";
1434 }
1435 }
1436 else
6e1f65b5 1437 gcc_unreachable ();
8f90be4c
NC
1438 }
1439 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1440 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1441 else
6e1f65b5 1442 gcc_unreachable ();
8f90be4c
NC
1443}
1444
1445/* Predicates used by the templates. */
1446
8f90be4c 1447int
08903e08 1448mcore_arith_S_operand (rtx op)
8f90be4c
NC
1449{
1450 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1451 return 1;
1452
1453 return 0;
1454}
1455
4816b8e4
NC
1456/* Expand insert bit field. BRC */
1457
8f90be4c 1458int
08903e08 1459mcore_expand_insv (rtx operands[])
8f90be4c
NC
1460{
1461 int width = INTVAL (operands[1]);
1462 int posn = INTVAL (operands[2]);
1463 int mask;
1464 rtx mreg, sreg, ereg;
1465
1466 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1467 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1468 we really want the md part to do. */
8f90be4c
NC
1469 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1470 {
4816b8e4
NC
1471 /* Do directly with bseti or bclri. */
1472 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1473 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1474 {
1475 mask = ~(1 << posn);
f7df4a84
RS
1476 emit_insn (gen_rtx_SET (operands[0],
1477 gen_rtx_AND (SImode, operands[0],
1478 GEN_INT (mask))));
8f90be4c
NC
1479 }
1480 else
1481 {
1482 mask = 1 << posn;
f7df4a84
RS
1483 emit_insn (gen_rtx_SET (operands[0],
1484 gen_rtx_IOR (SImode, operands[0],
1485 GEN_INT (mask))));
8f90be4c
NC
1486 }
1487
1488 return 1;
1489 }
1490
43a88a8c 1491 /* Look at some bit-field placements that we aren't interested
4816b8e4 1492 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1493 if (! TARGET_W_FIELD)
1494 return 0; /* Generally, give up about now. */
1495
1496 if (width == 8 && posn % 8 == 0)
1497 /* Byte sized and aligned; let caller break it up. */
1498 return 0;
1499
1500 if (width == 16 && posn % 16 == 0)
1501 /* Short sized and aligned; let caller break it up. */
1502 return 0;
1503
1504 /* The general case - we can do this a little bit better than what the
1505 machine independent part tries. This will get rid of all the subregs
1506 that mess up constant folding in combine when working with relaxed
4816b8e4 1507 immediates. */
8f90be4c
NC
1508
1509 /* If setting the entire field, do it directly. */
6e3a343d
NC
1510 if (GET_CODE (operands[3]) == CONST_INT
1511 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1512 {
1513 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f7df4a84
RS
1514 emit_insn (gen_rtx_SET (operands[0],
1515 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1516 return 1;
1517 }
1518
1519 /* Generate the clear mask. */
1520 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1521
1522 /* Clear the field, to overlay it later with the source. */
f7df4a84
RS
1523 emit_insn (gen_rtx_SET (operands[0],
1524 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1525
1526 /* If the source is constant 0, we've nothing to add back. */
1527 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1528 return 1;
1529
1530 /* XXX: Should we worry about more games with constant values?
1531 We've covered the high profile: set/clear single-bit and many-bit
1532 fields. How often do we see "arbitrary bit pattern" constants? */
1533 sreg = copy_to_mode_reg (SImode, operands[3]);
1534
1535 /* Extract src as same width as dst (needed for signed values). We
1536 always have to do this since we widen everything to SImode.
1537 We don't have to mask if we're shifting this up against the
1538 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1539 bits. */
f27cd94d 1540 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1541 {
1542 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f7df4a84 1543 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1544 }
1545
4816b8e4 1546 /* Insert source value in dest. */
8f90be4c 1547 if (posn != 0)
f7df4a84
RS
1548 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1549 GEN_INT (posn))));
8f90be4c 1550
f7df4a84
RS
1551 emit_insn (gen_rtx_SET (operands[0],
1552 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1553
1554 return 1;
1555}
8f90be4c
NC
1556\f
1557/* ??? Block move stuff stolen from m88k. This code has not been
1558 verified for correctness. */
1559
1560/* Emit code to perform a block move. Choose the best method.
1561
1562 OPERANDS[0] is the destination.
1563 OPERANDS[1] is the source.
1564 OPERANDS[2] is the size.
1565 OPERANDS[3] is the alignment safe to use. */
1566
1567/* Emit code to perform a block move with an offset sequence of ldw/st
1568 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1569 known constants. DEST and SRC are registers. OFFSET is the known
1570 starting point for the output pattern. */
1571
ef4bddc2 1572static const machine_mode mode_from_align[] =
8f90be4c
NC
1573{
1574 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1575};
1576
1577static void
88042663 1578block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1579{
1580 rtx temp[2];
ef4bddc2 1581 machine_mode mode[2];
8f90be4c 1582 int amount[2];
88042663 1583 bool active[2];
8f90be4c
NC
1584 int phase = 0;
1585 int next;
88042663
RH
1586 int offset_ld = 0;
1587 int offset_st = 0;
1588 rtx x;
8f90be4c 1589
88042663
RH
1590 x = XEXP (dst_mem, 0);
1591 if (!REG_P (x))
1592 {
1593 x = force_reg (Pmode, x);
1594 dst_mem = replace_equiv_address (dst_mem, x);
1595 }
8f90be4c 1596
88042663
RH
1597 x = XEXP (src_mem, 0);
1598 if (!REG_P (x))
8f90be4c 1599 {
88042663
RH
1600 x = force_reg (Pmode, x);
1601 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1602 }
1603
88042663
RH
1604 active[0] = active[1] = false;
1605
8f90be4c
NC
1606 do
1607 {
8f90be4c 1608 next = phase;
88042663 1609 phase ^= 1;
8f90be4c
NC
1610
1611 if (size > 0)
1612 {
88042663
RH
1613 int next_amount;
1614
1615 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1616 next_amount = MIN (next_amount, align);
1617
1618 amount[next] = next_amount;
1619 mode[next] = mode_from_align[next_amount];
1620 temp[next] = gen_reg_rtx (mode[next]);
1621
1622 x = adjust_address (src_mem, mode[next], offset_ld);
f7df4a84 1623 emit_insn (gen_rtx_SET (temp[next], x));
88042663
RH
1624
1625 offset_ld += next_amount;
1626 size -= next_amount;
1627 active[next] = true;
8f90be4c
NC
1628 }
1629
1630 if (active[phase])
1631 {
88042663 1632 active[phase] = false;
8f90be4c 1633
88042663 1634 x = adjust_address (dst_mem, mode[phase], offset_st);
f7df4a84 1635 emit_insn (gen_rtx_SET (x, temp[phase]));
88042663 1636
8f90be4c
NC
1637 offset_st += amount[phase];
1638 }
1639 }
1640 while (active[next]);
1641}
1642
88042663
RH
1643bool
1644mcore_expand_block_move (rtx *operands)
8f90be4c 1645{
88042663
RH
1646 HOST_WIDE_INT align, bytes, max;
1647
1648 if (GET_CODE (operands[2]) != CONST_INT)
1649 return false;
1650
1651 bytes = INTVAL (operands[2]);
1652 align = INTVAL (operands[3]);
8f90be4c 1653
88042663
RH
1654 if (bytes <= 0)
1655 return false;
1656 if (align > 4)
1657 align = 4;
1658
1659 switch (align)
8f90be4c 1660 {
88042663
RH
1661 case 4:
1662 if (bytes & 1)
1663 max = 4*4;
1664 else if (bytes & 3)
1665 max = 8*4;
1666 else
1667 max = 16*4;
1668 break;
1669 case 2:
1670 max = 4*2;
1671 break;
1672 case 1:
1673 max = 4*1;
1674 break;
1675 default:
6e1f65b5 1676 gcc_unreachable ();
88042663
RH
1677 }
1678
1679 if (bytes <= max)
1680 {
1681 block_move_sequence (operands[0], operands[1], bytes, align);
1682 return true;
8f90be4c
NC
1683 }
1684
88042663 1685 return false;
8f90be4c
NC
1686}
1687\f
1688
1689/* Code to generate prologue and epilogue sequences. */
1690static int number_of_regs_before_varargs;
4816b8e4 1691
bd5bd7ac 1692/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1693 for a varargs function. */
1694static int current_function_anonymous_args;
1695
8f90be4c
NC
1696#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1697#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1698#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1699
8f90be4c 1700static void
08903e08 1701layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1702{
1703 int n;
1704 unsigned int i;
1705 int nbytes;
1706 int regarg;
1707 int localregarg;
8f90be4c
NC
1708 int outbounds;
1709 unsigned int growths;
1710 int step;
1711
1712 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1713 was passed partially in registers and partially on the stack. */
38173d38 1714 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1715
1716 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1717 if (current_function_anonymous_args)
1718 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1719
1720 infp->arg_size = nbytes;
1721
1722 /* How much space to save non-volatile registers we stomp. */
1723 infp->reg_mask = calc_live_regs (& n);
1724 infp->reg_size = n * 4;
1725
14bc6742 1726 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1727 infp->local_size = get_frame_size ();
38173d38 1728 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1729
1730 /* Make sure we have a whole number of words for the locals. */
1731 if (infp->local_size % STACK_BYTES)
1732 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1733
1734 /* Only thing we know we have to pad is the outbound space, since
1735 we've aligned our locals assuming that base of locals is aligned. */
1736 infp->pad_local = 0;
1737 infp->pad_reg = 0;
1738 infp->pad_outbound = 0;
1739 if (infp->outbound_size % STACK_BYTES)
1740 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1741
1742 /* Now we see how we want to stage the prologue so that it does
1743 the most appropriate stack growth and register saves to either:
1744 (1) run fast,
1745 (2) reduce instruction space, or
1746 (3) reduce stack space. */
b6a1cbae 1747 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1748 infp->growth[i] = 0;
1749
1750 regarg = infp->reg_size + infp->arg_size;
1751 localregarg = infp->local_size + regarg;
8f90be4c
NC
1752 outbounds = infp->outbound_size + infp->pad_outbound;
1753 growths = 0;
1754
1755 /* XXX: Consider one where we consider localregarg + outbound too! */
1756
1757 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1758 use stw's with offsets and buy the frame in one shot. */
1759 if (localregarg <= ADDI_REACH
1760 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1761 {
1762 /* Make sure we'll be aligned. */
1763 if (localregarg % STACK_BYTES)
1764 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1765
1766 step = localregarg + infp->pad_reg;
1767 infp->reg_offset = infp->local_size;
1768
1769 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1770 {
1771 step += outbounds;
1772 infp->reg_offset += outbounds;
1773 outbounds = 0;
1774 }
1775
1776 infp->arg_offset = step - 4;
1777 infp->growth[growths++] = step;
1778 infp->reg_growth = growths;
1779 infp->local_growth = growths;
1780
4816b8e4 1781 /* If we haven't already folded it in. */
8f90be4c
NC
1782 if (outbounds)
1783 infp->growth[growths++] = outbounds;
1784
1785 goto finish;
1786 }
1787
1788 /* Frame can't be done with a single subi, but can be done with 2
1789 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1790 shift some of the stack purchase into the first subi, so both are
1791 single instructions. */
1792 if (localregarg <= STORE_REACH
1793 && (infp->local_size > ADDI_REACH)
1794 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1795 {
1796 int all;
1797
1798 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1799 if (localregarg % STACK_BYTES)
1800 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1801
1802 all = localregarg + infp->pad_reg + infp->pad_local;
1803 step = ADDI_REACH; /* As much up front as we can. */
1804 if (step > all)
1805 step = all;
1806
1807 /* XXX: Consider whether step will still be aligned; we believe so. */
1808 infp->arg_offset = step - 4;
1809 infp->growth[growths++] = step;
1810 infp->reg_growth = growths;
1811 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1812 all -= step;
1813
4816b8e4 1814 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1815 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1816 {
1817 all += outbounds;
1818 outbounds = 0;
1819 }
1820
4816b8e4 1821 /* Get the rest of the locals in place. */
8f90be4c
NC
1822 step = all;
1823 infp->growth[growths++] = step;
1824 infp->local_growth = growths;
1825 all -= step;
1826
819bfe0e 1827 gcc_assert (all == 0);
8f90be4c 1828
4816b8e4 1829 /* Finish off if we need to do so. */
8f90be4c
NC
1830 if (outbounds)
1831 infp->growth[growths++] = outbounds;
1832
1833 goto finish;
1834 }
1835
1836 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1837 Then we buy the rest of the frame in 1 or 2 steps depending on
1838 whether we need a frame pointer. */
1839 if ((regarg % STACK_BYTES) == 0)
1840 {
1841 infp->growth[growths++] = regarg;
1842 infp->reg_growth = growths;
1843 infp->arg_offset = regarg - 4;
1844 infp->reg_offset = 0;
1845
1846 if (infp->local_size % STACK_BYTES)
1847 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1848
1849 step = infp->local_size + infp->pad_local;
1850
1851 if (!frame_pointer_needed)
1852 {
1853 step += outbounds;
1854 outbounds = 0;
1855 }
1856
1857 infp->growth[growths++] = step;
1858 infp->local_growth = growths;
1859
4816b8e4 1860 /* If there's any left to be done. */
8f90be4c
NC
1861 if (outbounds)
1862 infp->growth[growths++] = outbounds;
1863
1864 goto finish;
1865 }
1866
1867 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1868 -- regarg is not aligned, but it's a small number of registers;
1869 use some of localsize so that regarg is aligned and then
1870 save the registers. */
8f90be4c
NC
1871
1872 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1873 -- does not optimize space consumption.
1874 -- does not attempt to optimize instruction counts.
1875 -- but it is safe for all alignments. */
8f90be4c
NC
1876 if (regarg % STACK_BYTES != 0)
1877 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1878
1879 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1880 infp->reg_growth = growths;
1881 infp->arg_offset = infp->growth[0] - 4;
1882 infp->reg_offset = 0;
1883
1884 if (frame_pointer_needed)
1885 {
1886 if (infp->local_size % STACK_BYTES != 0)
1887 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1888
1889 infp->growth[growths++] = infp->local_size + infp->pad_local;
1890 infp->local_growth = growths;
1891
1892 infp->growth[growths++] = outbounds;
1893 }
1894 else
1895 {
1896 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1897 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1898
1899 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1900 infp->local_growth = growths;
1901 }
1902
f27cd94d 1903 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1904 finish:
819bfe0e
JM
1905 gcc_assert (infp->reg_offset >= 0);
1906 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1907
1908 for (i = 0; i < growths; i++)
6e1f65b5 1909 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1910}
1911
1912/* Define the offset between two registers, one to be eliminated, and
1913 the other its replacement, at the start of a routine. */
4816b8e4 1914
8f90be4c 1915int
08903e08 1916mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1917{
1918 int above_frame;
1919 int below_frame;
1920 struct mcore_frame fi;
1921
1922 layout_mcore_frame (& fi);
1923
1924 /* fp to ap */
1925 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1926 /* sp to fp */
1927 below_frame = fi.outbound_size + fi.pad_outbound;
1928
1929 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1930 return above_frame;
1931
1932 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1933 return above_frame + below_frame;
1934
1935 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1936 return below_frame;
1937
6e1f65b5 1938 gcc_unreachable ();
8f90be4c
NC
1939}
1940
4816b8e4
NC
1941/* Keep track of some information about varargs for the prolog. */
1942
09a2b93a 1943static void
d5cc9181 1944mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
e7056ca4 1945 const function_arg_info &arg,
09a2b93a
KH
1946 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1947 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1948{
d5cc9181
JR
1949 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1950
8f90be4c
NC
1951 current_function_anonymous_args = 1;
1952
1953 /* We need to know how many argument registers are used before
1954 the varargs start, so that we can push the remaining argument
1955 registers during the prologue. */
e7056ca4
RS
1956 number_of_regs_before_varargs
1957 = *args_so_far + mcore_num_arg_regs (arg.mode, arg.type);
8f90be4c 1958
dab66575 1959 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1960 Until I can find it this workaround always pushes the
1961 last named argument onto the stack. */
09a2b93a 1962 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1963
1964 /* The last named argument may be split between argument registers
1965 and the stack. Allow for this here. */
1966 if (number_of_regs_before_varargs > NPARM_REGS)
1967 number_of_regs_before_varargs = NPARM_REGS;
1968}
1969
1970void
08903e08 1971mcore_expand_prolog (void)
8f90be4c
NC
1972{
1973 struct mcore_frame fi;
1974 int space_allocated = 0;
1975 int growth = 0;
1976
1977 /* Find out what we're doing. */
1978 layout_mcore_frame (&fi);
1979
1980 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1981 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1982
1983 if (TARGET_CG_DATA)
1984 {
1985 /* Emit a symbol for this routine's frame size. */
1986 rtx x;
8f90be4c
NC
1987
1988 x = DECL_RTL (current_function_decl);
1989
6e1f65b5 1990 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1991
1992 x = XEXP (x, 0);
1993
6e1f65b5 1994 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1995
04695783 1996 free (mcore_current_function_name);
8f90be4c 1997
1dcd444b 1998 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1999
2000 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2001
e3b5732b 2002 if (cfun->calls_alloca)
8f90be4c
NC
2003 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2004
2005 /* 970425: RBE:
2006 We're looking at how the 8byte alignment affects stack layout
2007 and where we had to pad things. This emits information we can
2008 extract which tells us about frame sizes and the like. */
2009 fprintf (asm_out_file,
2010 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2011 mcore_current_function_name,
2012 fi.arg_size, fi.reg_size, fi.reg_mask,
2013 fi.local_size, fi.outbound_size,
2014 frame_pointer_needed);
2015 }
2016
2017 if (mcore_naked_function_p ())
2018 return;
2019
2020 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 2021 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2022
2023 /* If we have a parameter passed partially in regs and partially in memory,
2024 the registers will have been stored to memory already in function.c. So
2025 we only need to do something here for varargs functions. */
38173d38 2026 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2027 {
2028 int offset;
2029 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2030 int remaining = fi.arg_size;
2031
2032 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2033 {
2034 emit_insn (gen_movsi
f1c25d3b 2035 (gen_rtx_MEM (SImode,
0a81f074
RS
2036 plus_constant (Pmode, stack_pointer_rtx,
2037 offset)),
f1c25d3b 2038 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2039 }
2040 }
2041
4816b8e4 2042 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2043 if (growth < fi.reg_growth)
08903e08 2044 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2045
2046 if (fi.reg_size != 0)
2047 {
2048 int i;
2049 int offs = fi.reg_offset;
2050
2051 for (i = 15; i >= 0; i--)
2052 {
2053 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2054 {
2055 int first_reg = 15;
2056
2057 while (fi.reg_mask & (1 << first_reg))
2058 first_reg--;
2059 first_reg++;
2060
f1c25d3b
KH
2061 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2062 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2063 GEN_INT (16 - first_reg)));
2064
2065 i -= (15 - first_reg);
2066 offs += (16 - first_reg) * 4;
2067 }
2068 else if (fi.reg_mask & (1 << i))
2069 {
2070 emit_insn (gen_movsi
f1c25d3b 2071 (gen_rtx_MEM (SImode,
0a81f074
RS
2072 plus_constant (Pmode, stack_pointer_rtx,
2073 offs)),
f1c25d3b 2074 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2075 offs += 4;
2076 }
2077 }
2078 }
2079
2080 /* Figure the locals + outbounds. */
2081 if (frame_pointer_needed)
2082 {
2083 /* If we haven't already purchased to 'fp'. */
2084 if (growth < fi.local_growth)
08903e08 2085 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2086
2087 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2088
4816b8e4 2089 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2090 if (fi.growth[growth])
2091 output_stack_adjust (-1, fi.growth[growth++]);
2092 }
2093 else
2094 {
2095 if (growth < fi.local_growth)
08903e08 2096 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2097 if (fi.growth[growth])
2098 output_stack_adjust (-1, fi.growth[growth++]);
2099 }
2100}
2101
2102void
08903e08 2103mcore_expand_epilog (void)
8f90be4c
NC
2104{
2105 struct mcore_frame fi;
2106 int i;
2107 int offs;
2108 int growth = MAX_STACK_GROWS - 1 ;
2109
f27cd94d 2110
8f90be4c
NC
2111 /* Find out what we're doing. */
2112 layout_mcore_frame(&fi);
2113
2114 if (mcore_naked_function_p ())
2115 return;
f27cd94d 2116
8f90be4c
NC
2117 /* If we had a frame pointer, restore the sp from that. */
2118 if (frame_pointer_needed)
2119 {
2120 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2121 growth = fi.local_growth - 1;
2122 }
2123 else
2124 {
2125 /* XXX: while loop should accumulate and do a single sell. */
2126 while (growth >= fi.local_growth)
2127 {
2128 if (fi.growth[growth] != 0)
2129 output_stack_adjust (1, fi.growth[growth]);
2130 growth--;
2131 }
2132 }
2133
2134 /* Make sure we've shrunk stack back to the point where the registers
2135 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2136 register save information back off the stack. */
8f90be4c
NC
2137 while (growth >= fi.reg_growth)
2138 output_stack_adjust ( 1, fi.growth[growth--]);
2139
2140 offs = fi.reg_offset;
2141
2142 for (i = 15; i >= 0; i--)
2143 {
2144 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2145 {
2146 int first_reg;
2147
2148 /* Find the starting register. */
2149 first_reg = 15;
2150
2151 while (fi.reg_mask & (1 << first_reg))
2152 first_reg--;
2153
2154 first_reg++;
2155
f1c25d3b
KH
2156 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2157 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2158 GEN_INT (16 - first_reg)));
2159
2160 i -= (15 - first_reg);
2161 offs += (16 - first_reg) * 4;
2162 }
2163 else if (fi.reg_mask & (1 << i))
2164 {
2165 emit_insn (gen_movsi
f1c25d3b
KH
2166 (gen_rtx_REG (SImode, i),
2167 gen_rtx_MEM (SImode,
0a81f074
RS
2168 plus_constant (Pmode, stack_pointer_rtx,
2169 offs))));
8f90be4c
NC
2170 offs += 4;
2171 }
2172 }
2173
2174 /* Give back anything else. */
dab66575 2175 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2176 while (growth >= 0)
2177 output_stack_adjust ( 1, fi.growth[growth--]);
2178}
2179\f
2180/* This code is borrowed from the SH port. */
2181
2182/* The MCORE cannot load a large constant into a register, constants have to
2183 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2184 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2185 means that we often have to dump a constant inside a function, and
2186 generate code to branch around it.
2187
2188 It is important to minimize this, since the branches will slow things
2189 down and make things bigger.
2190
2191 Worst case code looks like:
2192
2193 lrw L1,r0
2194 br L2
2195 align
2196 L1: .long value
2197 L2:
2198 ..
2199
2200 lrw L3,r0
2201 br L4
2202 align
2203 L3: .long value
2204 L4:
2205 ..
2206
2207 We fix this by performing a scan before scheduling, which notices which
2208 instructions need to have their operands fetched from the constant table
2209 and builds the table.
2210
2211 The algorithm is:
2212
2213 scan, find an instruction which needs a pcrel move. Look forward, find the
2214 last barrier which is within MAX_COUNT bytes of the requirement.
2215 If there isn't one, make one. Process all the instructions between
2216 the find and the barrier.
2217
2218 In the above example, we can tell that L3 is within 1k of L1, so
2219 the first move can be shrunk from the 2 insn+constant sequence into
2220 just 1 insn, and the constant moved to L3 to make:
2221
2222 lrw L1,r0
2223 ..
2224 lrw L3,r0
2225 bra L4
2226 align
2227 L3:.long value
2228 L4:.long value
2229
2230 Then the second move becomes the target for the shortening process. */
2231
2232typedef struct
2233{
2234 rtx value; /* Value in table. */
2235 rtx label; /* Label of value. */
2236} pool_node;
2237
2238/* The maximum number of constants that can fit into one pool, since
2239 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2240 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2241 we need to add a branch/align before the constant pool. */
2242
2243#define MAX_COUNT 1016
2244#define MAX_POOL_SIZE (MAX_COUNT/4)
2245static pool_node pool_vector[MAX_POOL_SIZE];
2246static int pool_size;
2247
2248/* Dump out any constants accumulated in the final pass. These
2249 will only be labels. */
4816b8e4 2250
f27cd94d 2251const char *
08903e08 2252mcore_output_jump_label_table (void)
8f90be4c
NC
2253{
2254 int i;
2255
2256 if (pool_size)
2257 {
2258 fprintf (asm_out_file, "\t.align 2\n");
2259
2260 for (i = 0; i < pool_size; i++)
2261 {
2262 pool_node * p = pool_vector + i;
2263
4977bab6 2264 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2265
2266 output_asm_insn (".long %0", &p->value);
2267 }
2268
2269 pool_size = 0;
2270 }
2271
2272 return "";
2273}
2274
8f90be4c 2275/* Check whether insn is a candidate for a conditional. */
4816b8e4 2276
8f90be4c 2277static cond_type
08903e08 2278is_cond_candidate (rtx insn)
8f90be4c
NC
2279{
2280 /* The only things we conditionalize are those that can be directly
2281 changed into a conditional. Only bother with SImode items. If
2282 we wanted to be a little more aggressive, we could also do other
4816b8e4 2283 modes such as DImode with reg-reg move or load 0. */
b64925dc 2284 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2285 {
2286 rtx pat = PATTERN (insn);
2287 rtx src, dst;
2288
2289 if (GET_CODE (pat) != SET)
2290 return COND_NO;
2291
2292 dst = XEXP (pat, 0);
2293
2294 if ((GET_CODE (dst) != REG &&
2295 GET_CODE (dst) != SUBREG) ||
2296 GET_MODE (dst) != SImode)
2297 return COND_NO;
2298
2299 src = XEXP (pat, 1);
2300
2301 if ((GET_CODE (src) == REG ||
2302 (GET_CODE (src) == SUBREG &&
2303 GET_CODE (SUBREG_REG (src)) == REG)) &&
2304 GET_MODE (src) == SImode)
2305 return COND_MOV_INSN;
2306 else if (GET_CODE (src) == CONST_INT &&
2307 INTVAL (src) == 0)
2308 return COND_CLR_INSN;
2309 else if (GET_CODE (src) == PLUS &&
2310 (GET_CODE (XEXP (src, 0)) == REG ||
2311 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2312 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2313 GET_MODE (XEXP (src, 0)) == SImode &&
2314 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2315 INTVAL (XEXP (src, 1)) == 1)
2316 return COND_INC_INSN;
2317 else if (((GET_CODE (src) == MINUS &&
2318 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2319 INTVAL( XEXP (src, 1)) == 1) ||
2320 (GET_CODE (src) == PLUS &&
2321 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2322 INTVAL (XEXP (src, 1)) == -1)) &&
2323 (GET_CODE (XEXP (src, 0)) == REG ||
2324 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2325 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2326 GET_MODE (XEXP (src, 0)) == SImode)
2327 return COND_DEC_INSN;
2328
14bc6742 2329 /* Some insns that we don't bother with:
8f90be4c
NC
2330 (set (rx:DI) (ry:DI))
2331 (set (rx:DI) (const_int 0))
2332 */
2333
2334 }
b64925dc
SB
2335 else if (JUMP_P (insn)
2336 && GET_CODE (PATTERN (insn)) == SET
2337 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
8f90be4c
NC
2338 return COND_BRANCH_INSN;
2339
2340 return COND_NO;
2341}
2342
2343/* Emit a conditional version of insn and replace the old insn with the
2344 new one. Return the new insn if emitted. */
4816b8e4 2345
b32d5189 2346static rtx_insn *
d8485bdb 2347emit_new_cond_insn (rtx_insn *insn, int cond)
8f90be4c
NC
2348{
2349 rtx c_insn = 0;
2350 rtx pat, dst, src;
2351 cond_type num;
2352
2353 if ((num = is_cond_candidate (insn)) == COND_NO)
2354 return NULL;
2355
2356 pat = PATTERN (insn);
2357
b64925dc 2358 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2359 {
2360 dst = SET_DEST (pat);
2361 src = SET_SRC (pat);
2362 }
2363 else
cd4c46f3
KG
2364 {
2365 dst = JUMP_LABEL (insn);
2366 src = NULL_RTX;
2367 }
8f90be4c
NC
2368
2369 switch (num)
2370 {
2371 case COND_MOV_INSN:
2372 case COND_CLR_INSN:
2373 if (cond)
2374 c_insn = gen_movt0 (dst, src, dst);
2375 else
2376 c_insn = gen_movt0 (dst, dst, src);
2377 break;
2378
2379 case COND_INC_INSN:
2380 if (cond)
2381 c_insn = gen_incscc (dst, dst);
2382 else
2383 c_insn = gen_incscc_false (dst, dst);
2384 break;
2385
2386 case COND_DEC_INSN:
2387 if (cond)
2388 c_insn = gen_decscc (dst, dst);
2389 else
2390 c_insn = gen_decscc_false (dst, dst);
2391 break;
2392
2393 case COND_BRANCH_INSN:
2394 if (cond)
2395 c_insn = gen_branch_true (dst);
2396 else
2397 c_insn = gen_branch_false (dst);
2398 break;
2399
2400 default:
2401 return NULL;
2402 }
2403
2404 /* Only copy the notes if they exist. */
2405 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2406 {
2407 /* We really don't need to bother with the notes and links at this
2408 point, but go ahead and save the notes. This will help is_dead()
2409 when applying peepholes (links don't matter since they are not
2410 used any more beyond this point for the mcore). */
2411 REG_NOTES (c_insn) = REG_NOTES (insn);
2412 }
2413
2414 if (num == COND_BRANCH_INSN)
2415 {
2416 /* For jumps, we need to be a little bit careful and emit the new jump
2417 before the old one and to update the use count for the target label.
2418 This way, the barrier following the old (uncond) jump will get
2419 deleted, but the label won't. */
2420 c_insn = emit_jump_insn_before (c_insn, insn);
2421
2422 ++ LABEL_NUSES (dst);
2423
2424 JUMP_LABEL (c_insn) = dst;
2425 }
2426 else
2427 c_insn = emit_insn_after (c_insn, insn);
2428
2429 delete_insn (insn);
2430
b32d5189 2431 return as_a <rtx_insn *> (c_insn);
8f90be4c
NC
2432}
2433
2434/* Attempt to change a basic block into a series of conditional insns. This
2435 works by taking the branch at the end of the 1st block and scanning for the
2436 end of the 2nd block. If all instructions in the 2nd block have cond.
2437 versions and the label at the start of block 3 is the same as the target
2438 from the branch at block 1, then conditionalize all insn in block 2 using
2439 the inverse condition of the branch at block 1. (Note I'm bending the
2440 definition of basic block here.)
2441
2442 e.g., change:
2443
2444 bt L2 <-- end of block 1 (delete)
2445 mov r7,r8
2446 addu r7,1
2447 br L3 <-- end of block 2
2448
2449 L2: ... <-- start of block 3 (NUSES==1)
2450 L3: ...
2451
2452 to:
2453
2454 movf r7,r8
2455 incf r7
2456 bf L3
2457
2458 L3: ...
2459
2460 we can delete the L2 label if NUSES==1 and re-apply the optimization
2461 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2462 if-then-else statement to be conditionalized. BRC */
b32d5189
DM
2463static rtx_insn *
2464conditionalize_block (rtx_insn *first)
8f90be4c 2465{
b32d5189 2466 rtx_insn *insn;
8f90be4c 2467 rtx br_pat;
b32d5189
DM
2468 rtx_insn *end_blk_1_br = 0;
2469 rtx_insn *end_blk_2_insn = 0;
2470 rtx_insn *start_blk_3_lab = 0;
8f90be4c
NC
2471 int cond;
2472 int br_lab_num;
2473 int blk_size = 0;
2474
2475
2476 /* Check that the first insn is a candidate conditional jump. This is
2477 the one that we'll eliminate. If not, advance to the next insn to
2478 try. */
b64925dc
SB
2479 if (! JUMP_P (first)
2480 || GET_CODE (PATTERN (first)) != SET
2481 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
8f90be4c
NC
2482 return NEXT_INSN (first);
2483
2484 /* Extract some information we need. */
2485 end_blk_1_br = first;
2486 br_pat = PATTERN (end_blk_1_br);
2487
2488 /* Complement the condition since we use the reverse cond. for the insns. */
2489 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2490
2491 /* Determine what kind of branch we have. */
2492 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2493 {
2494 /* A normal branch, so extract label out of first arm. */
2495 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2496 }
2497 else
2498 {
2499 /* An inverse branch, so extract the label out of the 2nd arm
2500 and complement the condition. */
2501 cond = (cond == 0);
2502 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2503 }
2504
2505 /* Scan forward for the start of block 2: it must start with a
2506 label and that label must be the same as the branch target
2507 label from block 1. We don't care about whether block 2 actually
2508 ends with a branch or a label (an uncond. branch is
2509 conditionalizable). */
2510 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2511 {
2512 enum rtx_code code;
2513
2514 code = GET_CODE (insn);
2515
14bc6742 2516 /* Look for the label at the start of block 3. */
8f90be4c
NC
2517 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2518 break;
2519
2520 /* Skip barriers, notes, and conditionalizable insns. If the
2521 insn is not conditionalizable or makes this optimization fail,
2522 just return the next insn so we can start over from that point. */
2523 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2524 return NEXT_INSN (insn);
2525
112cdef5 2526 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2527 if (code == JUMP_INSN || code == INSN)
2528 {
2529 blk_size ++;
2530 end_blk_2_insn = insn;
2531 }
2532 }
2533
2534 if (!insn)
2535 return insn;
2536
2537 /* It is possible for this optimization to slow performance if the blocks
2538 are long. This really depends upon whether the branch is likely taken
2539 or not. If the branch is taken, we slow performance in many cases. But,
2540 if the branch is not taken, we always help performance (for a single
2541 block, but for a double block (i.e. when the optimization is re-applied)
2542 this is not true since the 'right thing' depends on the overall length of
2543 the collapsed block). As a compromise, don't apply this optimization on
2544 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2545 the best threshold depends on the latencies of the instructions (i.e.,
2546 the branch penalty). */
2547 if (optimize > 1 && blk_size > 2)
2548 return insn;
2549
2550 /* At this point, we've found the start of block 3 and we know that
2551 it is the destination of the branch from block 1. Also, all
2552 instructions in the block 2 are conditionalizable. So, apply the
2553 conditionalization and delete the branch. */
2554 start_blk_3_lab = insn;
2555
2556 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2557 insn = NEXT_INSN (insn))
2558 {
b32d5189 2559 rtx_insn *newinsn;
8f90be4c 2560
4654c0cf 2561 if (insn->deleted ())
8f90be4c
NC
2562 continue;
2563
14bc6742 2564 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2565 if ((newinsn = emit_new_cond_insn (insn, cond)))
2566 {
2567 if (end_blk_2_insn == insn)
2568 end_blk_2_insn = newinsn;
2569
2570 insn = newinsn;
2571 }
2572 }
2573
2574 /* Note whether we will delete the label starting blk 3 when the jump
2575 gets deleted. If so, we want to re-apply this optimization at the
2576 last real instruction right before the label. */
2577 if (LABEL_NUSES (start_blk_3_lab) == 1)
2578 {
2579 start_blk_3_lab = 0;
2580 }
2581
2582 /* ??? we probably should redistribute the death notes for this insn, esp.
2583 the death of cc, but it doesn't really matter this late in the game.
2584 The peepholes all use is_dead() which will find the correct death
2585 regardless of whether there is a note. */
2586 delete_insn (end_blk_1_br);
2587
2588 if (! start_blk_3_lab)
2589 return end_blk_2_insn;
2590
4816b8e4 2591 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2592 return NEXT_INSN (start_blk_3_lab);
2593}
2594
2595/* Apply the conditionalization of blocks optimization. This is the
2596 outer loop that traverses through the insns scanning for a branch
2597 that signifies an opportunity to apply the optimization. Note that
2598 this optimization is applied late. If we could apply it earlier,
2599 say before cse 2, it may expose more optimization opportunities.
2600 but, the pay back probably isn't really worth the effort (we'd have
2601 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2602 in before cse 2). */
2603
8f90be4c 2604static void
08903e08 2605conditionalize_optimization (void)
8f90be4c 2606{
b32d5189 2607 rtx_insn *insn;
8f90be4c 2608
18dbd950 2609 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2610 continue;
2611}
2612
18dbd950 2613/* This is to handle loads from the constant pool. */
4816b8e4 2614
18dbd950 2615static void
08903e08 2616mcore_reorg (void)
8f90be4c
NC
2617{
2618 /* Reset this variable. */
2619 current_function_anonymous_args = 0;
2620
8f90be4c
NC
2621 if (optimize == 0)
2622 return;
2623
2624 /* Conditionalize blocks where we can. */
18dbd950 2625 conditionalize_optimization ();
8f90be4c
NC
2626
2627 /* Literal pool generation is now pushed off until the assembler. */
2628}
2629
2630\f
f0f4da32 2631/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2632
f0f4da32 2633bool
08903e08 2634mcore_r15_operand_p (rtx x)
f0f4da32
RS
2635{
2636 switch (GET_CODE (x))
2637 {
2638 case CONST_INT:
2639 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2640
f0f4da32
RS
2641 case REG:
2642 case SUBREG:
2643 case MEM:
2644 return 1;
2645
2646 default:
2647 return 0;
2648 }
2649}
2650
0a2aaacc 2651/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2652 directly move X into it, use r1-r14 as a temporary. */
08903e08 2653
f0f4da32 2654enum reg_class
0a2aaacc 2655mcore_secondary_reload_class (enum reg_class rclass,
ef4bddc2 2656 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2657{
0a2aaacc 2658 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2659 && !mcore_r15_operand_p (x))
2660 return LRW_REGS;
2661 return NO_REGS;
2662}
8f90be4c 2663
f0f4da32 2664/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2665 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2666 use LRW_REGS instead. */
08903e08 2667
8f90be4c 2668enum reg_class
0a2aaacc 2669mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2670{
0a2aaacc 2671 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2672 return LRW_REGS;
8f90be4c 2673
0a2aaacc 2674 return rclass;
8f90be4c
NC
2675}
2676
2677/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2678 register. Note that the current version doesn't worry about whether
2679 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2680 in r2 matches an SImode in r2. Might think in the future about whether
2681 we want to be able to say something about modes. */
08903e08 2682
8f90be4c 2683int
08903e08 2684mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2685{
14bc6742 2686 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2687 while (GET_CODE (x) == SUBREG)
2688 x = SUBREG_REG (x);
2689
2690 while (GET_CODE (y) == SUBREG)
2691 y = SUBREG_REG (y);
2692
2693 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2694 return 1;
2695
2696 return 0;
2697}
2698
c5387660
JM
2699static void
2700mcore_option_override (void)
8f90be4c 2701{
8f90be4c
NC
2702 /* Only the m340 supports little endian code. */
2703 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2704 target_flags |= MASK_M340;
8f90be4c 2705}
fac0f722 2706
8f90be4c 2707\f
8f90be4c
NC
2708/* Compute the number of word sized registers needed to
2709 hold a function argument of mode MODE and type TYPE. */
08903e08 2710
8f90be4c 2711int
ef4bddc2 2712mcore_num_arg_regs (machine_mode mode, const_tree type)
8f90be4c
NC
2713{
2714 int size;
2715
0ffef200
RS
2716 function_arg_info arg (const_cast<tree> (type), mode, /*named=*/true);
2717 if (targetm.calls.must_pass_in_stack (arg))
8f90be4c
NC
2718 return 0;
2719
2720 if (type && mode == BLKmode)
2721 size = int_size_in_bytes (type);
2722 else
2723 size = GET_MODE_SIZE (mode);
2724
2725 return ROUND_ADVANCE (size);
2726}
2727
2728static rtx
ef4bddc2 2729handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2730{
2731 int size;
2732
696e78bf 2733 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2734 of bytes is passed packed into registers (or spilled onto the stack if
2735 not enough registers are available) with the last few bytes of the
2736 structure being packed, left-justified, into the last register/stack slot.
2737 GCC handles this correctly if the last word is in a stack slot, but we
2738 have to generate a special, PARALLEL RTX if the last word is in an
2739 argument register. */
2740 if (type
2741 && TYPE_MODE (type) == BLKmode
2742 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2743 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2744 && (size % UNITS_PER_WORD != 0)
2745 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2746 {
2747 rtx arg_regs [NPARM_REGS];
2748 int nregs;
2749 rtx result;
2750 rtvec rtvec;
2751
2752 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2753 {
2754 arg_regs [nregs] =
2755 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2756 GEN_INT (nregs * UNITS_PER_WORD));
2757 nregs ++;
2758 }
2759
2760 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2761 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2762 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2763 arg_regs[3], arg_regs[4], arg_regs[5]);
2764
2765 result = gen_rtx_PARALLEL (mode, rtvec);
2766 return result;
2767 }
2768
2769 return gen_rtx_REG (mode, reg);
2770}
2771
2772rtx
cde0f3fd 2773mcore_function_value (const_tree valtype, const_tree func)
8f90be4c 2774{
ef4bddc2 2775 machine_mode mode;
8f90be4c
NC
2776 int unsigned_p;
2777
2778 mode = TYPE_MODE (valtype);
2779
cde0f3fd 2780 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2781 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2782
2783 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2784}
2785
2786/* Define where to put the arguments to a function.
2787 Value is zero to push the argument on the stack,
2788 or a hard register in which to store the argument.
2789
8f90be4c
NC
2790 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2791 the preceding args and about the function being called.
6783fdb7 2792 ARG is a description of the argument.
8f90be4c
NC
2793
2794 On MCore the first args are normally in registers
2795 and the rest are pushed. Any arg that starts within the first
2796 NPARM_REGS words is at least partially passed in a register unless
2797 its data type forbids. */
08903e08 2798
4665ac17 2799static rtx
6783fdb7 2800mcore_function_arg (cumulative_args_t cum, const function_arg_info &arg)
8f90be4c
NC
2801{
2802 int arg_reg;
2803
6783fdb7 2804 if (!arg.named || arg.end_marker_p ())
8f90be4c
NC
2805 return 0;
2806
0ffef200 2807 if (targetm.calls.must_pass_in_stack (arg))
8f90be4c
NC
2808 return 0;
2809
6783fdb7 2810 arg_reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
8f90be4c
NC
2811
2812 if (arg_reg < NPARM_REGS)
6783fdb7
RS
2813 return handle_structs_in_regs (arg.mode, arg.type,
2814 FIRST_PARM_REG + arg_reg);
8f90be4c
NC
2815
2816 return 0;
2817}
2818
4665ac17 2819static void
6930c98c
RS
2820mcore_function_arg_advance (cumulative_args_t cum_v,
2821 const function_arg_info &arg)
4665ac17 2822{
d5cc9181
JR
2823 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2824
6930c98c
RS
2825 *cum = (ROUND_REG (*cum, arg.mode)
2826 + (int) arg.named * mcore_num_arg_regs (arg.mode, arg.type));
4665ac17
NF
2827}
2828
c2ed6cf8 2829static unsigned int
ef4bddc2 2830mcore_function_arg_boundary (machine_mode mode,
c2ed6cf8
NF
2831 const_tree type ATTRIBUTE_UNUSED)
2832{
2833 /* Doubles must be aligned to an 8 byte boundary. */
2834 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2835 ? BIGGEST_ALIGNMENT
2836 : PARM_BOUNDARY);
2837}
2838
78a52f11 2839/* Returns the number of bytes of argument registers required to hold *part*
a7c81bc1
RS
2840 of argument ARG. If the argument fits entirely in the argument registers,
2841 or entirely on the stack, then 0 is returned. CUM is the number of
2842 argument registers already used by earlier parameters to the function. */
08903e08 2843
78a52f11 2844static int
a7c81bc1 2845mcore_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
8f90be4c 2846{
a7c81bc1 2847 int reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
8f90be4c 2848
a7c81bc1 2849 if (!arg.named)
8f90be4c
NC
2850 return 0;
2851
0ffef200 2852 if (targetm.calls.must_pass_in_stack (arg))
8f90be4c
NC
2853 return 0;
2854
2855 /* REG is not the *hardware* register number of the register that holds
2856 the argument, it is the *argument* register number. So for example,
2857 the first argument to a function goes in argument register 0, which
2858 translates (for the MCore) into hardware register 2. The second
2859 argument goes into argument register 1, which translates into hardware
2860 register 3, and so on. NPARM_REGS is the number of argument registers
2861 supported by the target, not the maximum hardware register number of
2862 the target. */
2863 if (reg >= NPARM_REGS)
2864 return 0;
2865
2866 /* If the argument fits entirely in registers, return 0. */
a7c81bc1 2867 if (reg + mcore_num_arg_regs (arg.mode, arg.type) <= NPARM_REGS)
8f90be4c
NC
2868 return 0;
2869
2870 /* The argument overflows the number of available argument registers.
2871 Compute how many argument registers have not yet been assigned to
2872 hold an argument. */
2873 reg = NPARM_REGS - reg;
2874
2875 /* Return partially in registers and partially on the stack. */
78a52f11 2876 return reg * UNITS_PER_WORD;
8f90be4c
NC
2877}
2878\f
a0ab749a 2879/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2880
8f90be4c 2881int
08903e08 2882mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2883{
2884 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2885}
2886
a0ab749a 2887/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2888
8f90be4c 2889int
08903e08 2890mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2891{
2892 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2893}
2894
2895/* Mark a DECL as being dllexport'd. */
08903e08 2896
8f90be4c 2897static void
08903e08 2898mcore_mark_dllexport (tree decl)
8f90be4c 2899{
cbd3488b 2900 const char * oldname;
8f90be4c
NC
2901 char * newname;
2902 rtx rtlname;
2903 tree idp;
2904
2905 rtlname = XEXP (DECL_RTL (decl), 0);
2906
6e1f65b5
NS
2907 if (GET_CODE (rtlname) == MEM)
2908 rtlname = XEXP (rtlname, 0);
2909 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2910 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2911
2912 if (mcore_dllexport_name_p (oldname))
2913 return; /* Already done. */
2914
5ead67f6 2915 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2916 sprintf (newname, "@e.%s", oldname);
2917
2918 /* We pass newname through get_identifier to ensure it has a unique
2919 address. RTL processing can sometimes peek inside the symbol ref
2920 and compare the string's addresses to see if two symbols are
2921 identical. */
2922 /* ??? At least I think that's why we do this. */
2923 idp = get_identifier (newname);
2924
2925 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2926 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2927}
2928
2929/* Mark a DECL as being dllimport'd. */
08903e08 2930
8f90be4c 2931static void
08903e08 2932mcore_mark_dllimport (tree decl)
8f90be4c 2933{
cbd3488b 2934 const char * oldname;
8f90be4c
NC
2935 char * newname;
2936 tree idp;
2937 rtx rtlname;
2938 rtx newrtl;
2939
2940 rtlname = XEXP (DECL_RTL (decl), 0);
2941
6e1f65b5
NS
2942 if (GET_CODE (rtlname) == MEM)
2943 rtlname = XEXP (rtlname, 0);
2944 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2945 oldname = XSTR (rtlname, 0);
8f90be4c 2946
6e1f65b5
NS
2947 gcc_assert (!mcore_dllexport_name_p (oldname));
2948 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2949 return; /* Already done. */
2950
2951 /* ??? One can well ask why we're making these checks here,
2952 and that would be a good question. */
2953
2954 /* Imported variables can't be initialized. */
2955 if (TREE_CODE (decl) == VAR_DECL
2956 && !DECL_VIRTUAL_P (decl)
2957 && DECL_INITIAL (decl))
2958 {
dee15844 2959 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2960 return;
2961 }
2962
2963 /* `extern' needn't be specified with dllimport.
2964 Specify `extern' now and hope for the best. Sigh. */
2965 if (TREE_CODE (decl) == VAR_DECL
2966 /* ??? Is this test for vtables needed? */
2967 && !DECL_VIRTUAL_P (decl))
2968 {
2969 DECL_EXTERNAL (decl) = 1;
2970 TREE_PUBLIC (decl) = 1;
2971 }
2972
5ead67f6 2973 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2974 sprintf (newname, "@i.__imp_%s", oldname);
2975
2976 /* We pass newname through get_identifier to ensure it has a unique
2977 address. RTL processing can sometimes peek inside the symbol ref
2978 and compare the string's addresses to see if two symbols are
2979 identical. */
2980 /* ??? At least I think that's why we do this. */
2981 idp = get_identifier (newname);
2982
f1c25d3b
KH
2983 newrtl = gen_rtx_MEM (Pmode,
2984 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2985 IDENTIFIER_POINTER (idp)));
2986 XEXP (DECL_RTL (decl), 0) = newrtl;
2987}
2988
2989static int
08903e08 2990mcore_dllexport_p (tree decl)
8f90be4c
NC
2991{
2992 if ( TREE_CODE (decl) != VAR_DECL
2993 && TREE_CODE (decl) != FUNCTION_DECL)
2994 return 0;
2995
91d231cb 2996 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2997}
2998
2999static int
08903e08 3000mcore_dllimport_p (tree decl)
8f90be4c
NC
3001{
3002 if ( TREE_CODE (decl) != VAR_DECL
3003 && TREE_CODE (decl) != FUNCTION_DECL)
3004 return 0;
3005
91d231cb 3006 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3007}
3008
fb49053f 3009/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3010 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3011
3012static void
08903e08 3013mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3014{
8f90be4c
NC
3015 /* Mark the decl so we can tell from the rtl whether the object is
3016 dllexport'd or dllimport'd. */
3017 if (mcore_dllexport_p (decl))
3018 mcore_mark_dllexport (decl);
3019 else if (mcore_dllimport_p (decl))
3020 mcore_mark_dllimport (decl);
3021
3022 /* It might be that DECL has already been marked as dllimport, but
3023 a subsequent definition nullified that. The attribute is gone
3024 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3025 else if ((TREE_CODE (decl) == FUNCTION_DECL
3026 || TREE_CODE (decl) == VAR_DECL)
3027 && DECL_RTL (decl) != NULL_RTX
3028 && GET_CODE (DECL_RTL (decl)) == MEM
3029 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3030 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3031 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3032 {
3cce094d 3033 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3034 tree idp = get_identifier (oldname + 9);
f1c25d3b 3035 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3036
3037 XEXP (DECL_RTL (decl), 0) = newrtl;
3038
3039 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3040 ??? We leave these alone for now. */
3041 }
3042}
3043
772c5265
RH
3044/* Undo the effects of the above. */
3045
3046static const char *
08903e08 3047mcore_strip_name_encoding (const char * str)
772c5265
RH
3048{
3049 return str + (str[0] == '@' ? 3 : 0);
3050}
3051
8f90be4c
NC
3052/* MCore specific attribute support.
3053 dllexport - for exporting a function/variable that will live in a dll
3054 dllimport - for importing a function/variable from a dll
3055 naked - do not create a function prologue/epilogue. */
8f90be4c 3056
91d231cb
JM
3057/* Handle a "naked" attribute; arguments as in
3058 struct attribute_spec.handler. */
08903e08 3059
91d231cb 3060static tree
08903e08
SB
3061mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3062 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3063{
d45eae79 3064 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3065 {
29d08eba
JM
3066 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3067 name);
91d231cb 3068 *no_add_attrs = true;
8f90be4c
NC
3069 }
3070
91d231cb 3071 return NULL_TREE;
8f90be4c
NC
3072}
3073
ae46c4e0
RH
3074/* ??? It looks like this is PE specific? Oh well, this is what the
3075 old code did as well. */
8f90be4c 3076
ae46c4e0 3077static void
08903e08 3078mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3079{
3080 int len;
0139adca 3081 const char * name;
8f90be4c 3082 char * string;
f27cd94d 3083 const char * prefix;
8f90be4c
NC
3084
3085 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3086
3087 /* Strip off any encoding in name. */
772c5265 3088 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3089
3090 /* The object is put in, for example, section .text$foo.
3091 The linker will then ultimately place them in .text
3092 (everything from the $ on is stripped). */
3093 if (TREE_CODE (decl) == FUNCTION_DECL)
3094 prefix = ".text$";
f710504c 3095 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3096 section might have relocs against it. */
4e4d733e 3097 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3098 prefix = ".rdata$";
3099 else
3100 prefix = ".data$";
3101
3102 len = strlen (name) + strlen (prefix);
5ead67f6 3103 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3104
3105 sprintf (string, "%s%s", prefix, name);
3106
f961457f 3107 set_decl_section_name (decl, string);
8f90be4c
NC
3108}
3109
3110int
08903e08 3111mcore_naked_function_p (void)
8f90be4c 3112{
91d231cb 3113 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3114}
7c262518 3115
d45eae79
SL
3116static bool
3117mcore_warn_func_return (tree decl)
3118{
3119 /* Naked functions are implemented entirely in assembly, including the
3120 return sequence, so suppress warnings about this. */
3121 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3122}
3123
ede75ee8 3124#ifdef OBJECT_FORMAT_ELF
7c262518 3125static void
c18a5b6c
MM
3126mcore_asm_named_section (const char *name,
3127 unsigned int flags ATTRIBUTE_UNUSED,
3128 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3129{
3130 fprintf (asm_out_file, "\t.section %s\n", name);
3131}
ede75ee8 3132#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3133
dc7efe6e
KH
3134/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3135
09a2b93a
KH
3136static void
3137mcore_external_libcall (rtx fun)
3138{
3139 fprintf (asm_out_file, "\t.import\t");
3140 assemble_name (asm_out_file, XSTR (fun, 0));
3141 fprintf (asm_out_file, "\n");
3142}
3143
dc7efe6e
KH
3144/* Worker function for TARGET_RETURN_IN_MEMORY. */
3145
09a2b93a 3146static bool
586de218 3147mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3148{
586de218 3149 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3150 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3151}
71e0af3c
RH
3152
3153/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3154 Output assembler code for a block containing the constant parts
3155 of a trampoline, leaving space for the variable parts.
3156
3157 On the MCore, the trampoline looks like:
3158 lrw r1, function
3159 lrw r13, area
3160 jmp r13
3161 or r0, r0
3162 .literals */
3163
3164static void
3165mcore_asm_trampoline_template (FILE *f)
3166{
3167 fprintf (f, "\t.short 0x7102\n");
3168 fprintf (f, "\t.short 0x7d02\n");
3169 fprintf (f, "\t.short 0x00cd\n");
3170 fprintf (f, "\t.short 0x1e00\n");
3171 fprintf (f, "\t.long 0\n");
3172 fprintf (f, "\t.long 0\n");
3173}
3174
3175/* Worker function for TARGET_TRAMPOLINE_INIT. */
3176
3177static void
3178mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3179{
3180 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3181 rtx mem;
3182
3183 emit_block_move (m_tramp, assemble_trampoline_template (),
3184 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3185
3186 mem = adjust_address (m_tramp, SImode, 8);
3187 emit_move_insn (mem, chain_value);
3188 mem = adjust_address (m_tramp, SImode, 12);
3189 emit_move_insn (mem, fnaddr);
3190}
1a627b35
RS
3191
3192/* Implement TARGET_LEGITIMATE_CONSTANT_P
3193
3194 On the MCore, allow anything but a double. */
3195
3196static bool
ef4bddc2 3197mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1a627b35
RS
3198{
3199 return GET_CODE (x) != CONST_DOUBLE;
3200}
e7c6980e
AS
3201
3202/* Helper function for `mcore_legitimate_address_p'. */
3203
3204static bool
3205mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3206{
3207 if (strict_p)
3208 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3209 else
3210 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3211}
3212
3213static bool
3214mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3215{
3216 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3217}
3218
3219/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3220 and for DI is 0..56 because we use two SI loads, etc. */
3221
3222static bool
3223mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3224{
3225 if (CONST_INT_P (op))
3226 {
3227 if (GET_MODE_SIZE (mode) >= 4
3228 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3229 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3230 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3231 return true;
3232 if (GET_MODE_SIZE (mode) == 2
3233 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3234 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3235 return true;
3236 if (GET_MODE_SIZE (mode) == 1
3237 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3238 return true;
3239 }
3240 return false;
3241}
3242
3243
3244/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3245
3246 Allow REG
3247 REG + disp */
3248
3249static bool
3250mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3251 addr_space_t as)
3252{
3253 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3254
3255 if (mcore_base_register_rtx_p (x, strict_p))
3256 return true;
3257 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3258 {
3259 rtx xop0 = XEXP (x, 0);
3260 rtx xop1 = XEXP (x, 1);
3261 if (mcore_base_register_rtx_p (xop0, strict_p)
3262 && mcore_legitimate_index_p (mode, xop1))
3263 return true;
3264 if (mcore_base_register_rtx_p (xop1, strict_p)
3265 && mcore_legitimate_index_p (mode, xop0))
3266 return true;
3267 }
3268
3269 return false;
3270}
3271
f939c3e6
RS
3272/* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
3273 even registers. */
3274
3275static bool
3276mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3277{
3278 if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
3279 return (regno & 1) == 0;
3280
3281 return regno < 18;
3282}
99e1629f
RS
3283
3284/* Implement TARGET_MODES_TIEABLE_P. */
3285
3286static bool
3287mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3288{
3289 return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
3290}