]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Minor vn_reference_lookup_3 tweak
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
cbe34bb5 2 Copyright (C) 1993-2017 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
bc27e96c 20#include "config.h"
4bd048ef 21#include "system.h"
4977bab6 22#include "coretypes.h"
c7131fb2 23#include "backend.h"
e11c4407 24#include "target.h"
4816b8e4 25#include "rtl.h"
e11c4407 26#include "tree.h"
c7131fb2 27#include "df.h"
4d0cdd0c 28#include "memmodel.h"
e11c4407
AM
29#include "tm_p.h"
30#include "stringpool.h"
314e6352 31#include "attribs.h"
e11c4407
AM
32#include "emit-rtl.h"
33#include "diagnostic-core.h"
d8a2d370
DN
34#include "stor-layout.h"
35#include "varasm.h"
d8a2d370 36#include "calls.h"
8f90be4c 37#include "mcore.h"
8f90be4c 38#include "output.h"
36566b39 39#include "explow.h"
8f90be4c 40#include "expr.h"
60393bbc 41#include "cfgrtl.h"
9b2b7279 42#include "builtins.h"
0f8012fb 43#include "regs.h"
8f90be4c 44
994c5d85 45/* This file should be included last. */
d58627a0
RS
46#include "target-def.h"
47
8f90be4c
NC
48/* For dumping information about frame sizes. */
49char * mcore_current_function_name = 0;
50long mcore_current_compilation_timestamp = 0;
51
52/* Global variables for machine-dependent things. */
53
8f90be4c
NC
54/* Provides the class number of the smallest class containing
55 reg number. */
5a82ecd9 56const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
57{
58 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
59 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
60 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
62 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
63};
64
f27cd94d
NC
65struct mcore_frame
66{
08903e08
SB
67 int arg_size; /* Stdarg spills (bytes). */
68 int reg_size; /* Non-volatile reg saves (bytes). */
69 int reg_mask; /* Non-volatile reg saves. */
70 int local_size; /* Locals. */
71 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
72 int pad_outbound;
73 int pad_local;
74 int pad_reg;
75 /* Describe the steps we'll use to grow it. */
08903e08 76#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
77 int growth[MAX_STACK_GROWS];
78 int arg_offset;
79 int reg_offset;
80 int reg_growth;
81 int local_growth;
82};
83
84typedef enum
85{
86 COND_NO,
87 COND_MOV_INSN,
88 COND_CLR_INSN,
89 COND_INC_INSN,
90 COND_DEC_INSN,
91 COND_BRANCH_INSN
92}
93cond_type;
94
08903e08
SB
95static void output_stack_adjust (int, int);
96static int calc_live_regs (int *);
e0416079 97static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
ef4bddc2 98static const char * output_inline_const (machine_mode, rtx *);
08903e08 99static void layout_mcore_frame (struct mcore_frame *);
ef4bddc2 100static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
08903e08 101static cond_type is_cond_candidate (rtx);
6251fe93 102static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
b32d5189 103static rtx_insn *conditionalize_block (rtx_insn *);
08903e08
SB
104static void conditionalize_optimization (void);
105static void mcore_reorg (void);
ef4bddc2 106static rtx handle_structs_in_regs (machine_mode, const_tree, int);
08903e08
SB
107static void mcore_mark_dllexport (tree);
108static void mcore_mark_dllimport (tree);
109static int mcore_dllexport_p (tree);
110static int mcore_dllimport_p (tree);
08903e08 111static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 112#ifdef OBJECT_FORMAT_ELF
08903e08 113static void mcore_asm_named_section (const char *,
c18a5b6c 114 unsigned int, tree);
ede75ee8 115#endif
349f851e 116static void mcore_print_operand (FILE *, rtx, int);
cc8ca59e 117static void mcore_print_operand_address (FILE *, machine_mode, rtx);
349f851e 118static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
119static void mcore_unique_section (tree, int);
120static void mcore_encode_section_info (tree, rtx, int);
121static const char *mcore_strip_name_encoding (const char *);
d96be87b
JBG
122static int mcore_const_costs (rtx, RTX_CODE);
123static int mcore_and_cost (rtx);
124static int mcore_ior_cost (rtx);
e548c9df 125static bool mcore_rtx_costs (rtx, machine_mode, int, int,
68f932c4 126 int *, bool);
09a2b93a 127static void mcore_external_libcall (rtx);
586de218 128static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 129static int mcore_arg_partial_bytes (cumulative_args_t,
ef4bddc2 130 machine_mode,
78a52f11 131 tree, bool);
d5cc9181 132static rtx mcore_function_arg (cumulative_args_t,
ef4bddc2 133 machine_mode,
4665ac17 134 const_tree, bool);
d5cc9181 135static void mcore_function_arg_advance (cumulative_args_t,
ef4bddc2 136 machine_mode,
4665ac17 137 const_tree, bool);
ef4bddc2 138static unsigned int mcore_function_arg_boundary (machine_mode,
c2ed6cf8 139 const_tree);
71e0af3c
RH
140static void mcore_asm_trampoline_template (FILE *);
141static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 142static bool mcore_warn_func_return (tree);
c5387660 143static void mcore_option_override (void);
ef4bddc2 144static bool mcore_legitimate_constant_p (machine_mode, rtx);
e7c6980e
AS
145static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
146 addr_space_t);
f939c3e6 147static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
99e1629f 148static bool mcore_modes_tieable_p (machine_mode, machine_mode);
5a82ecd9
ILT
149\f
150/* MCore specific attributes. */
151
152static const struct attribute_spec mcore_attribute_table[] =
153{
4849deb1
JJ
154 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
155 affects_type_identity, handler, exclude } */
156 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
157 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
158 { "naked", 0, 0, true, false, false, false,
159 mcore_handle_naked_attribute, NULL },
160 { NULL, 0, 0, false, false, false, false, NULL, NULL }
5a82ecd9 161};
672a6f42
NB
162\f
163/* Initialize the GCC target structure. */
09a2b93a
KH
164#undef TARGET_ASM_EXTERNAL_LIBCALL
165#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
166
b2ca3702 167#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
168#undef TARGET_MERGE_DECL_ATTRIBUTES
169#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
170#endif
171
301d03af 172#ifdef OBJECT_FORMAT_ELF
08903e08 173#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 174#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 175#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
176#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
177#endif
178
349f851e
NF
179#undef TARGET_PRINT_OPERAND
180#define TARGET_PRINT_OPERAND mcore_print_operand
181#undef TARGET_PRINT_OPERAND_ADDRESS
182#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
183#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
184#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
185
08903e08
SB
186#undef TARGET_ATTRIBUTE_TABLE
187#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
188#undef TARGET_ASM_UNIQUE_SECTION
189#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
190#undef TARGET_ASM_FUNCTION_RODATA_SECTION
191#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
192#undef TARGET_ENCODE_SECTION_INFO
193#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
194#undef TARGET_STRIP_NAME_ENCODING
195#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
196#undef TARGET_RTX_COSTS
197#define TARGET_RTX_COSTS mcore_rtx_costs
198#undef TARGET_ADDRESS_COST
b413068c 199#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
200#undef TARGET_MACHINE_DEPENDENT_REORG
201#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 202
cde0f3fd
PB
203#undef TARGET_PROMOTE_FUNCTION_MODE
204#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 205#undef TARGET_PROMOTE_PROTOTYPES
586de218 206#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 207
09a2b93a
KH
208#undef TARGET_RETURN_IN_MEMORY
209#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
210#undef TARGET_MUST_PASS_IN_STACK
211#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
212#undef TARGET_PASS_BY_REFERENCE
213#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
214#undef TARGET_ARG_PARTIAL_BYTES
215#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
216#undef TARGET_FUNCTION_ARG
217#define TARGET_FUNCTION_ARG mcore_function_arg
218#undef TARGET_FUNCTION_ARG_ADVANCE
219#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
220#undef TARGET_FUNCTION_ARG_BOUNDARY
221#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
222
223#undef TARGET_SETUP_INCOMING_VARARGS
224#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
225
71e0af3c
RH
226#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
227#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
228#undef TARGET_TRAMPOLINE_INIT
229#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
230
c5387660
JM
231#undef TARGET_OPTION_OVERRIDE
232#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 233
1a627b35
RS
234#undef TARGET_LEGITIMATE_CONSTANT_P
235#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
e7c6980e
AS
236#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
237#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
1a627b35 238
d81db636
SB
239#undef TARGET_LRA_P
240#define TARGET_LRA_P hook_bool_void_false
241
d45eae79
SL
242#undef TARGET_WARN_FUNC_RETURN
243#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
244
f939c3e6
RS
245#undef TARGET_HARD_REGNO_MODE_OK
246#define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
247
99e1629f
RS
248#undef TARGET_MODES_TIEABLE_P
249#define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
250
58e17cf8
RS
251#undef TARGET_CONSTANT_ALIGNMENT
252#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
253
f6897b10 254struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 255\f
8f90be4c
NC
256/* Adjust the stack and return the number of bytes taken to do it. */
257static void
08903e08 258output_stack_adjust (int direction, int size)
8f90be4c 259{
4816b8e4 260 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
261 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
262 {
f1c25d3b 263 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 264 rtx memref;
08903e08 265
8f90be4c
NC
266 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
267 do
268 {
269 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 270 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
271 MEM_VOLATILE_P (memref) = 1;
272 emit_insn (gen_movsi (memref, stack_pointer_rtx));
273 size -= mcore_stack_increment;
274 }
275 while (size > mcore_stack_increment);
276
4816b8e4
NC
277 /* SIZE is now the residual for the last adjustment,
278 which doesn't require a probe. */
8f90be4c
NC
279 }
280
281 if (size)
282 {
283 rtx insn;
284 rtx val = GEN_INT (size);
285
286 if (size > 32)
287 {
f1c25d3b 288 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
289 emit_insn (gen_movsi (nval, val));
290 val = nval;
291 }
292
293 if (direction > 0)
294 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
295 else
296 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
297
298 emit_insn (insn);
299 }
300}
301
4816b8e4
NC
302/* Work out the registers which need to be saved,
303 both as a mask and a count. */
304
8f90be4c 305static int
08903e08 306calc_live_regs (int * count)
8f90be4c
NC
307{
308 int reg;
309 int live_regs_mask = 0;
310
311 * count = 0;
312
313 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
314 {
6fb5fa3c 315 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
316 {
317 (*count)++;
318 live_regs_mask |= (1 << reg);
319 }
320 }
321
322 return live_regs_mask;
323}
324
325/* Print the operand address in x to the stream. */
4816b8e4 326
349f851e 327static void
cc8ca59e 328mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
8f90be4c
NC
329{
330 switch (GET_CODE (x))
331 {
332 case REG:
333 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
334 break;
335
336 case PLUS:
337 {
338 rtx base = XEXP (x, 0);
339 rtx index = XEXP (x, 1);
340
341 if (GET_CODE (base) != REG)
342 {
343 /* Ensure that BASE is a register (one of them must be). */
344 rtx temp = base;
345 base = index;
346 index = temp;
347 }
348
349 switch (GET_CODE (index))
350 {
351 case CONST_INT:
fd7b8952
KG
352 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
353 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
354 break;
355
356 default:
6e1f65b5 357 gcc_unreachable ();
8f90be4c
NC
358 }
359 }
360
361 break;
362
363 default:
364 output_addr_const (stream, x);
365 break;
366 }
367}
368
349f851e
NF
369static bool
370mcore_print_operand_punct_valid_p (unsigned char code)
371{
372 return (code == '.' || code == '#' || code == '*' || code == '^'
373 || code == '!');
374}
375
8f90be4c
NC
376/* Print operand x (an rtx) in assembler syntax to file stream
377 according to modifier code.
378
112cdef5 379 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
380 a double word value
381 'O' print a constant without the #
382 'M' print a constant as its negative
383 'P' print log2 of a power of two
384 'Q' print log2 of an inverse of a power of two
385 'U' print register for ldm/stm instruction
4816b8e4
NC
386 'X' print byte number for xtrbN instruction. */
387
349f851e 388static void
08903e08 389mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
390{
391 switch (code)
392 {
393 case 'N':
394 if (INTVAL(x) == -1)
395 fprintf (asm_out_file, "32");
396 else
397 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
398 break;
399 case 'P':
6e3a343d 400 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
401 break;
402 case 'Q':
403 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
404 break;
405 case 'O':
fd7b8952 406 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
407 break;
408 case 'M':
fd7b8952 409 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
410 break;
411 case 'R':
412 /* Next location along in memory or register. */
413 switch (GET_CODE (x))
414 {
415 case REG:
416 fputs (reg_names[REGNO (x) + 1], (stream));
417 break;
418 case MEM:
b72f00af 419 mcore_print_operand_address
cc8ca59e 420 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
421 break;
422 default:
6e1f65b5 423 gcc_unreachable ();
8f90be4c
NC
424 }
425 break;
426 case 'U':
427 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
428 reg_names[REGNO (x) + 3]);
429 break;
430 case 'x':
fd7b8952 431 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
432 break;
433 case 'X':
fd7b8952 434 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
435 break;
436
437 default:
438 switch (GET_CODE (x))
439 {
440 case REG:
441 fputs (reg_names[REGNO (x)], (stream));
442 break;
443 case MEM:
cc8ca59e 444 output_address (GET_MODE (x), XEXP (x, 0));
8f90be4c
NC
445 break;
446 default:
447 output_addr_const (stream, x);
448 break;
449 }
450 break;
451 }
452}
453
454/* What does a constant cost ? */
4816b8e4 455
3c50106f 456static int
08903e08 457mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 458{
6e3a343d 459 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
460
461 /* Easy constants. */
462 if ( CONST_OK_FOR_I (val)
463 || CONST_OK_FOR_M (val)
464 || CONST_OK_FOR_N (val)
465 || (code == PLUS && CONST_OK_FOR_L (val)))
466 return 1;
467 else if (code == AND
468 && ( CONST_OK_FOR_M (~val)
469 || CONST_OK_FOR_N (~val)))
470 return 2;
471 else if (code == PLUS
472 && ( CONST_OK_FOR_I (-val)
473 || CONST_OK_FOR_M (-val)
474 || CONST_OK_FOR_N (-val)))
475 return 2;
476
477 return 5;
478}
479
480/* What does an and instruction cost - we do this b/c immediates may
481 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
482 out. Otherwise we'll get bad code (multiple reloads of the same const). */
483
3c50106f 484static int
08903e08 485mcore_and_cost (rtx x)
8f90be4c 486{
6e3a343d 487 HOST_WIDE_INT val;
8f90be4c
NC
488
489 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
490 return 2;
491
492 val = INTVAL (XEXP (x, 1));
493
4816b8e4 494 /* Do it directly. */
8f90be4c
NC
495 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
496 return 2;
497 /* Takes one instruction to load. */
498 else if (const_ok_for_mcore (val))
499 return 3;
500 /* Takes two instructions to load. */
501 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
502 return 4;
503
4816b8e4 504 /* Takes a lrw to load. */
8f90be4c
NC
505 return 5;
506}
507
4816b8e4
NC
508/* What does an or cost - see and_cost(). */
509
3c50106f 510static int
08903e08 511mcore_ior_cost (rtx x)
8f90be4c 512{
6e3a343d 513 HOST_WIDE_INT val;
8f90be4c
NC
514
515 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
516 return 2;
517
518 val = INTVAL (XEXP (x, 1));
519
4816b8e4 520 /* Do it directly with bclri. */
8f90be4c
NC
521 if (CONST_OK_FOR_M (val))
522 return 2;
4816b8e4 523 /* Takes one instruction to load. */
8f90be4c
NC
524 else if (const_ok_for_mcore (val))
525 return 3;
4816b8e4 526 /* Takes two instructions to load. */
8f90be4c
NC
527 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
528 return 4;
529
4816b8e4 530 /* Takes a lrw to load. */
8f90be4c
NC
531 return 5;
532}
533
3c50106f 534static bool
e548c9df
AM
535mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
536 int opno ATTRIBUTE_UNUSED,
68f932c4 537 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f 538{
e548c9df
AM
539 int code = GET_CODE (x);
540
3c50106f
RH
541 switch (code)
542 {
543 case CONST_INT:
5a82ecd9 544 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
545 return true;
546 case CONST:
547 case LABEL_REF:
548 case SYMBOL_REF:
549 *total = 5;
550 return true;
551 case CONST_DOUBLE:
552 *total = 10;
553 return true;
554
555 case AND:
556 *total = COSTS_N_INSNS (mcore_and_cost (x));
557 return true;
558
559 case IOR:
560 *total = COSTS_N_INSNS (mcore_ior_cost (x));
561 return true;
562
563 case DIV:
564 case UDIV:
565 case MOD:
566 case UMOD:
567 case FLOAT:
568 case FIX:
569 *total = COSTS_N_INSNS (100);
570 return true;
571
572 default:
573 return false;
574 }
575}
576
f90b7a5a
PB
577/* Prepare the operands for a comparison. Return whether the branch/setcc
578 should reverse the operands. */
4816b8e4 579
f90b7a5a
PB
580bool
581mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 582{
f90b7a5a
PB
583 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
584 bool invert;
585
8f90be4c
NC
586 if (GET_CODE (op1) == CONST_INT)
587 {
6e3a343d 588 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
589
590 switch (code)
591 {
f90b7a5a
PB
592 case GTU:
593 /* Unsigned > 0 is the same as != 0; everything else is converted
594 below to LEU (reversed cmphs). */
595 if (val == 0)
596 code = NE;
597 break;
598
599 /* Check whether (LE A imm) can become (LT A imm + 1),
600 or (GT A imm) can become (GE A imm + 1). */
601 case GT:
8f90be4c
NC
602 case LE:
603 if (CONST_OK_FOR_J (val + 1))
604 {
f90b7a5a
PB
605 op1 = GEN_INT (val + 1);
606 code = code == LE ? LT : GE;
8f90be4c
NC
607 }
608 break;
609
610 default:
611 break;
612 }
613 }
f90b7a5a 614
8f90be4c
NC
615 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
616 op1 = force_reg (SImode, op1);
617
618 /* cmpnei: 0-31 (K immediate)
4816b8e4 619 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 620 invert = false;
8f90be4c
NC
621 switch (code)
622 {
4816b8e4 623 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 624 code = NE;
f90b7a5a 625 invert = true;
0c15dfc1 626 /* FALLTHRU */
4816b8e4
NC
627
628 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
629 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
630 op1 = force_reg (SImode, op1);
631 break;
632
4816b8e4 633 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 634 code = GT;
f90b7a5a 635 invert = true;
0c15dfc1 636 /* FALLTHRU */
4816b8e4
NC
637
638 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
639 if (GET_CODE (op1) == CONST_INT)
640 op1 = force_reg (SImode, op1);
641 break;
642
4816b8e4 643 case GE: /* Use inverted condition, cmplt. */
8f90be4c 644 code = LT;
f90b7a5a 645 invert = true;
0c15dfc1 646 /* FALLTHRU */
4816b8e4
NC
647
648 case LT: /* Use normal condition, cmplt. */
8f90be4c 649 if (GET_CODE (op1) == CONST_INT &&
08903e08 650 /* covered by btsti x,31. */
8f90be4c
NC
651 INTVAL (op1) != 0 &&
652 ! CONST_OK_FOR_J (INTVAL (op1)))
653 op1 = force_reg (SImode, op1);
654 break;
655
4816b8e4 656 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 657 /* We coped with unsigned > 0 above. */
6e1f65b5 658 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 659 code = LEU;
f90b7a5a 660 invert = true;
0c15dfc1 661 /* FALLTHRU */
4816b8e4 662
14bc6742 663 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
664 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
665 op1 = force_reg (SImode, op1);
666 break;
667
4816b8e4 668 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 669 code = GEU;
f90b7a5a 670 invert = true;
0c15dfc1 671 /* FALLTHRU */
4816b8e4
NC
672
673 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
674 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
675 op1 = force_reg (SImode, op1);
676 break;
677
678 default:
679 break;
680 }
681
f7df4a84 682 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
f90b7a5a 683 return invert;
8f90be4c
NC
684}
685
8f90be4c 686int
08903e08 687mcore_symbolic_address_p (rtx x)
8f90be4c
NC
688{
689 switch (GET_CODE (x))
690 {
691 case SYMBOL_REF:
692 case LABEL_REF:
693 return 1;
694 case CONST:
695 x = XEXP (x, 0);
696 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
697 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
698 && GET_CODE (XEXP (x, 1)) == CONST_INT);
699 default:
700 return 0;
701 }
702}
703
8f90be4c 704/* Functions to output assembly code for a function call. */
f27cd94d 705
8f90be4c 706char *
08903e08 707mcore_output_call (rtx operands[], int index)
8f90be4c
NC
708{
709 static char buffer[20];
710 rtx addr = operands [index];
711
712 if (REG_P (addr))
713 {
714 if (TARGET_CG_DATA)
715 {
6e1f65b5 716 gcc_assert (mcore_current_function_name);
8f90be4c
NC
717
718 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
719 "unknown", 1);
720 }
721
722 sprintf (buffer, "jsr\t%%%d", index);
723 }
724 else
725 {
726 if (TARGET_CG_DATA)
727 {
6e1f65b5
NS
728 gcc_assert (mcore_current_function_name);
729 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 730
6e1f65b5
NS
731 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
732 XSTR (addr, 0), 0);
8f90be4c
NC
733 }
734
735 sprintf (buffer, "jbsr\t%%%d", index);
736 }
737
738 return buffer;
739}
740
741/* Can we load a constant with a single instruction ? */
4816b8e4 742
54d58eaf 743int
6e3a343d 744const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
745{
746 if (value >= 0 && value <= 127)
747 return 1;
748
749 /* Try exact power of two. */
6e3a343d 750 if (CONST_OK_FOR_M (value))
8f90be4c
NC
751 return 1;
752
14bc6742 753 /* Try exact power of two - 1. */
6e3a343d 754 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
755 return 1;
756
757 return 0;
758}
759
760/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 761
8f90be4c 762int
6e3a343d 763mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 764{
6e3a343d 765 HOST_WIDE_INT x, y;
8f90be4c
NC
766
767 return try_constant_tricks (value, & x, & y) > 0;
768}
769
770/* Are we loading the constant using a not ? */
4816b8e4 771
8f90be4c 772int
6e3a343d 773mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 774{
6e3a343d 775 HOST_WIDE_INT x, y;
8f90be4c
NC
776
777 return try_constant_tricks (value, & x, & y) == 2;
778}
779
780/* Try tricks to load a constant inline and return the trick number if
781 success (0 is non-inlinable).
4816b8e4
NC
782
783 0: not inlinable
784 1: single instruction (do the usual thing)
785 2: single insn followed by a 'not'
786 3: single insn followed by a subi
787 4: single insn followed by an addi
788 5: single insn followed by rsubi
789 6: single insn followed by bseti
790 7: single insn followed by bclri
791 8: single insn followed by rotli
792 9: single insn followed by lsli
793 10: single insn followed by ixh
794 11: single insn followed by ixw. */
8f90be4c
NC
795
796static int
6e3a343d 797try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 798{
6e3a343d
NC
799 HOST_WIDE_INT i;
800 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
801
802 if (const_ok_for_mcore (value))
4816b8e4 803 return 1; /* Do the usual thing. */
8f90be4c 804
6e3a343d
NC
805 if (! TARGET_HARDLIT)
806 return 0;
807
808 if (const_ok_for_mcore (~value))
809 {
810 *x = ~value;
811 return 2;
812 }
813
814 for (i = 1; i <= 32; i++)
8f90be4c 815 {
6e3a343d 816 if (const_ok_for_mcore (value - i))
8f90be4c 817 {
6e3a343d
NC
818 *x = value - i;
819 *y = i;
820
821 return 3;
8f90be4c 822 }
6e3a343d
NC
823
824 if (const_ok_for_mcore (value + i))
8f90be4c 825 {
6e3a343d
NC
826 *x = value + i;
827 *y = i;
828
829 return 4;
8f90be4c 830 }
6e3a343d
NC
831 }
832
833 bit = 0x80000000ULL;
834
835 for (i = 0; i <= 31; i++)
836 {
837 if (const_ok_for_mcore (i - value))
8f90be4c 838 {
6e3a343d
NC
839 *x = i - value;
840 *y = i;
841
842 return 5;
8f90be4c 843 }
6e3a343d
NC
844
845 if (const_ok_for_mcore (value & ~bit))
8f90be4c 846 {
6e3a343d
NC
847 *y = bit;
848 *x = value & ~bit;
849 return 6;
8f90be4c 850 }
6e3a343d
NC
851
852 if (const_ok_for_mcore (value | bit))
8f90be4c 853 {
6e3a343d
NC
854 *y = ~bit;
855 *x = value | bit;
856
857 return 7;
8f90be4c 858 }
6e3a343d
NC
859
860 bit >>= 1;
861 }
862
863 shf = value;
864 rot = value;
865
866 for (i = 1; i < 31; i++)
867 {
868 int c;
869
870 /* MCore has rotate left. */
871 c = rot << 31;
872 rot >>= 1;
873 rot &= 0x7FFFFFFF;
874 rot |= c; /* Simulate rotate. */
875
876 if (const_ok_for_mcore (rot))
8f90be4c 877 {
6e3a343d
NC
878 *y = i;
879 *x = rot;
880
881 return 8;
882 }
883
884 if (shf & 1)
885 shf = 0; /* Can't use logical shift, low order bit is one. */
886
887 shf >>= 1;
888
889 if (shf != 0 && const_ok_for_mcore (shf))
890 {
891 *y = i;
892 *x = shf;
893
894 return 9;
8f90be4c
NC
895 }
896 }
6e3a343d
NC
897
898 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
899 {
900 *x = value / 3;
901
902 return 10;
903 }
904
905 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
906 {
907 *x = value / 5;
908
909 return 11;
910 }
8f90be4c
NC
911
912 return 0;
913}
914
8f90be4c
NC
915/* Check whether reg is dead at first. This is done by searching ahead
916 for either the next use (i.e., reg is live), a death note, or a set of
917 reg. Don't just use dead_or_set_p() since reload does not always mark
918 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
919 can ignore subregs by extracting the actual register. BRC */
920
8f90be4c 921int
b32d5189 922mcore_is_dead (rtx_insn *first, rtx reg)
8f90be4c 923{
b32d5189 924 rtx_insn *insn;
8f90be4c
NC
925
926 /* For mcore, subregs can't live independently of their parent regs. */
927 if (GET_CODE (reg) == SUBREG)
928 reg = SUBREG_REG (reg);
929
930 /* Dies immediately. */
931 if (dead_or_set_p (first, reg))
932 return 1;
933
934 /* Look for conclusive evidence of live/death, otherwise we have
935 to assume that it is live. */
936 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
937 {
b64925dc 938 if (JUMP_P (insn))
8f90be4c
NC
939 return 0; /* We lose track, assume it is alive. */
940
b64925dc 941 else if (CALL_P (insn))
8f90be4c
NC
942 {
943 /* Call's might use it for target or register parms. */
944 if (reg_referenced_p (reg, PATTERN (insn))
945 || find_reg_fusage (insn, USE, reg))
946 return 0;
947 else if (dead_or_set_p (insn, reg))
948 return 1;
949 }
b64925dc 950 else if (NONJUMP_INSN_P (insn))
8f90be4c
NC
951 {
952 if (reg_referenced_p (reg, PATTERN (insn)))
953 return 0;
954 else if (dead_or_set_p (insn, reg))
955 return 1;
956 }
957 }
958
1e5f1716 959 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
960 that control flow hid the use from us -- "I'm not dead yet". */
961 return 0;
962}
963
8f90be4c 964/* Count the number of ones in mask. */
4816b8e4 965
8f90be4c 966int
6e3a343d 967mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 968{
4816b8e4 969 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
970 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
971 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
972 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
973 mask = ((mask >> 8) + mask);
974
975 return (mask + (mask >> 16)) & 0xff;
976}
977
4816b8e4
NC
978/* Count the number of zeros in mask. */
979
8f90be4c 980int
6e3a343d 981mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
982{
983 return 32 - mcore_num_ones (mask);
984}
985
986/* Determine byte being masked. */
4816b8e4 987
8f90be4c 988int
08903e08 989mcore_byte_offset (unsigned int mask)
8f90be4c 990{
11f9ed1a 991 if (mask == 0x00ffffffL)
8f90be4c 992 return 0;
11f9ed1a 993 else if (mask == 0xff00ffffL)
8f90be4c 994 return 1;
11f9ed1a 995 else if (mask == 0xffff00ffL)
8f90be4c 996 return 2;
11f9ed1a 997 else if (mask == 0xffffff00L)
8f90be4c
NC
998 return 3;
999
1000 return -1;
1001}
1002
1003/* Determine halfword being masked. */
4816b8e4 1004
8f90be4c 1005int
08903e08 1006mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
1007{
1008 if (mask == 0x0000ffffL)
1009 return 0;
11f9ed1a 1010 else if (mask == 0xffff0000L)
8f90be4c
NC
1011 return 1;
1012
1013 return -1;
1014}
1015
1016/* Output a series of bseti's corresponding to mask. */
4816b8e4 1017
f27cd94d 1018const char *
08903e08 1019mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
1020{
1021 rtx out_operands[2];
1022 int bit;
1023
1024 out_operands[0] = dst;
1025
1026 for (bit = 0; bit < 32; bit++)
1027 {
1028 if ((mask & 0x1) == 0x1)
1029 {
1030 out_operands[1] = GEN_INT (bit);
1031
1032 output_asm_insn ("bseti\t%0,%1", out_operands);
1033 }
1034 mask >>= 1;
1035 }
1036
1037 return "";
1038}
1039
1040/* Output a series of bclri's corresponding to mask. */
4816b8e4 1041
f27cd94d 1042const char *
08903e08 1043mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1044{
1045 rtx out_operands[2];
1046 int bit;
1047
1048 out_operands[0] = dst;
1049
1050 for (bit = 0; bit < 32; bit++)
1051 {
1052 if ((mask & 0x1) == 0x0)
1053 {
1054 out_operands[1] = GEN_INT (bit);
1055
1056 output_asm_insn ("bclri\t%0,%1", out_operands);
1057 }
1058
1059 mask >>= 1;
1060 }
1061
1062 return "";
1063}
1064
1065/* Output a conditional move of two constants that are +/- 1 within each
1066 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1067 really worth the effort. */
4816b8e4 1068
f27cd94d 1069const char *
08903e08 1070mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1071{
6e3a343d
NC
1072 HOST_WIDE_INT load_value;
1073 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1074 rtx out_operands[4];
1075
1076 out_operands[0] = operands[0];
1077
4816b8e4 1078 /* Check to see which constant is loadable. */
8f90be4c
NC
1079 if (const_ok_for_mcore (INTVAL (operands[1])))
1080 {
1081 out_operands[1] = operands[1];
1082 out_operands[2] = operands[2];
1083 }
1084 else if (const_ok_for_mcore (INTVAL (operands[2])))
1085 {
1086 out_operands[1] = operands[2];
1087 out_operands[2] = operands[1];
1088
4816b8e4 1089 /* Complement test since constants are swapped. */
8f90be4c
NC
1090 cmp_t = (cmp_t == 0);
1091 }
1092 load_value = INTVAL (out_operands[1]);
1093 adjust_value = INTVAL (out_operands[2]);
1094
4816b8e4 1095 /* First output the test if folded into the pattern. */
8f90be4c
NC
1096
1097 if (test)
1098 output_asm_insn (test, operands);
1099
4816b8e4 1100 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1101 generated with a single instruction. maybe add general inlinable
1102 constants later (this will increase the # of patterns since the
4816b8e4 1103 instruction sequence has a different length attribute). */
8f90be4c
NC
1104 if (load_value >= 0 && load_value <= 127)
1105 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1106 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1107 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1108 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1109 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1110
4816b8e4 1111 /* Output the constant adjustment. */
8f90be4c
NC
1112 if (load_value > adjust_value)
1113 {
1114 if (cmp_t)
1115 output_asm_insn ("decf\t%0", out_operands);
1116 else
1117 output_asm_insn ("dect\t%0", out_operands);
1118 }
1119 else
1120 {
1121 if (cmp_t)
1122 output_asm_insn ("incf\t%0", out_operands);
1123 else
1124 output_asm_insn ("inct\t%0", out_operands);
1125 }
1126
1127 return "";
1128}
1129
1130/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1131 by an and (i.e. combine the not and the and into andn). BRC */
1132
f27cd94d 1133const char *
08903e08 1134mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1135{
6e3a343d 1136 HOST_WIDE_INT x, y;
8f90be4c 1137 rtx out_operands[3];
f27cd94d 1138 const char * load_op;
8f90be4c 1139 char buf[256];
6e1f65b5 1140 int trick_no;
8f90be4c 1141
6e1f65b5
NS
1142 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1143 gcc_assert (trick_no == 2);
8f90be4c
NC
1144
1145 out_operands[0] = operands[0];
6e3a343d 1146 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1147 out_operands[2] = operands[2];
1148
1149 if (x >= 0 && x <= 127)
1150 load_op = "movi\t%0,%1";
4816b8e4
NC
1151
1152 /* Try exact power of two. */
6e3a343d 1153 else if (CONST_OK_FOR_M (x))
8f90be4c 1154 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1155
1156 /* Try exact power of two - 1. */
6e3a343d 1157 else if (CONST_OK_FOR_N (x))
8f90be4c 1158 load_op = "bmaski\t%0,%N1";
4816b8e4 1159
6e3a343d
NC
1160 else
1161 {
1162 load_op = "BADMOVI-andn\t%0, %1";
1163 gcc_unreachable ();
1164 }
8f90be4c
NC
1165
1166 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1167 output_asm_insn (buf, out_operands);
1168
1169 return "";
1170}
1171
1172/* Output an inline constant. */
4816b8e4 1173
f27cd94d 1174static const char *
ef4bddc2 1175output_inline_const (machine_mode mode, rtx operands[])
8f90be4c 1176{
6e3a343d 1177 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1178 int trick_no;
1179 rtx out_operands[3];
1180 char buf[256];
1181 char load_op[256];
f27cd94d 1182 const char *dst_fmt;
6e3a343d 1183 HOST_WIDE_INT value;
8f90be4c
NC
1184
1185 value = INTVAL (operands[1]);
8f90be4c 1186
6e1f65b5
NS
1187 trick_no = try_constant_tricks (value, &x, &y);
1188 /* lrw's are handled separately: Large inlinable constants never get
1189 turned into lrw's. Our caller uses try_constant_tricks to back
1190 off to an lrw rather than calling this routine. */
1191 gcc_assert (trick_no != 0);
1192
8f90be4c
NC
1193 if (trick_no == 1)
1194 x = value;
1195
4816b8e4 1196 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1197 out_operands[0] = operands[0];
1198 out_operands[1] = GEN_INT (x);
1199
1200 if (trick_no > 2)
1201 out_operands[2] = GEN_INT (y);
1202
4816b8e4 1203 /* Select dst format based on mode. */
8f90be4c
NC
1204 if (mode == DImode && (! TARGET_LITTLE_END))
1205 dst_fmt = "%R0";
1206 else
1207 dst_fmt = "%0";
1208
1209 if (x >= 0 && x <= 127)
1210 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1211
8f90be4c 1212 /* Try exact power of two. */
6e3a343d 1213 else if (CONST_OK_FOR_M (x))
8f90be4c 1214 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1215
1216 /* Try exact power of two - 1. */
6e3a343d 1217 else if (CONST_OK_FOR_N (x))
8f90be4c 1218 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1219
6e3a343d
NC
1220 else
1221 {
1222 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1223 gcc_unreachable ();
1224 }
8f90be4c
NC
1225
1226 switch (trick_no)
1227 {
1228 case 1:
1229 strcpy (buf, load_op);
1230 break;
1231 case 2: /* not */
6e3a343d 1232 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1233 break;
1234 case 3: /* add */
6e3a343d 1235 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1236 break;
1237 case 4: /* sub */
6e3a343d 1238 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1239 break;
1240 case 5: /* rsub */
4816b8e4 1241 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1242 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1243 break;
6e3a343d
NC
1244 case 6: /* bseti */
1245 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1246 break;
1247 case 7: /* bclr */
6e3a343d 1248 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1249 break;
1250 case 8: /* rotl */
6e3a343d 1251 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1252 break;
1253 case 9: /* lsl */
6e3a343d 1254 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1255 break;
1256 case 10: /* ixh */
6e3a343d 1257 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1258 break;
1259 case 11: /* ixw */
6e3a343d 1260 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1261 break;
1262 default:
1263 return "";
1264 }
1265
1266 output_asm_insn (buf, out_operands);
1267
1268 return "";
1269}
1270
1271/* Output a move of a word or less value. */
4816b8e4 1272
f27cd94d 1273const char *
08903e08 1274mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
ef4bddc2 1275 machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1276{
1277 rtx dst = operands[0];
1278 rtx src = operands[1];
1279
1280 if (GET_CODE (dst) == REG)
1281 {
1282 if (GET_CODE (src) == REG)
1283 {
1284 if (REGNO (src) == CC_REG) /* r-c */
1285 return "mvc\t%0";
1286 else
1287 return "mov\t%0,%1"; /* r-r*/
1288 }
1289 else if (GET_CODE (src) == MEM)
1290 {
1291 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1292 return "lrw\t%0,[%1]"; /* a-R */
1293 else
f0f4da32
RS
1294 switch (GET_MODE (src)) /* r-m */
1295 {
4e10a5a7 1296 case E_SImode:
f0f4da32 1297 return "ldw\t%0,%1";
4e10a5a7 1298 case E_HImode:
f0f4da32 1299 return "ld.h\t%0,%1";
4e10a5a7 1300 case E_QImode:
f0f4da32
RS
1301 return "ld.b\t%0,%1";
1302 default:
6e1f65b5 1303 gcc_unreachable ();
f0f4da32 1304 }
8f90be4c
NC
1305 }
1306 else if (GET_CODE (src) == CONST_INT)
1307 {
6e3a343d 1308 HOST_WIDE_INT x, y;
8f90be4c
NC
1309
1310 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1311 return "movi\t%0,%1";
1312 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1313 return "bgeni\t%0,%P1\t// %1 %x1";
1314 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1315 return "bmaski\t%0,%N1\t// %1 %x1";
1316 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1317 return output_inline_const (SImode, operands); /* 1-2 insns */
1318 else
4816b8e4 1319 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1320 }
1321 else
4816b8e4 1322 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1323 }
1324 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1325 switch (GET_MODE (dst))
1326 {
4e10a5a7 1327 case E_SImode:
f0f4da32 1328 return "stw\t%1,%0";
4e10a5a7 1329 case E_HImode:
f0f4da32 1330 return "st.h\t%1,%0";
4e10a5a7 1331 case E_QImode:
f0f4da32
RS
1332 return "st.b\t%1,%0";
1333 default:
6e1f65b5 1334 gcc_unreachable ();
f0f4da32 1335 }
8f90be4c 1336
6e1f65b5 1337 gcc_unreachable ();
8f90be4c
NC
1338}
1339
8f90be4c
NC
1340/* Return a sequence of instructions to perform DI or DF move.
1341 Since the MCORE cannot move a DI or DF in one instruction, we have
1342 to take care when we see overlapping source and dest registers. */
4816b8e4 1343
f27cd94d 1344const char *
ef4bddc2 1345mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1346{
1347 rtx dst = operands[0];
1348 rtx src = operands[1];
1349
1350 if (GET_CODE (dst) == REG)
1351 {
1352 if (GET_CODE (src) == REG)
1353 {
1354 int dstreg = REGNO (dst);
1355 int srcreg = REGNO (src);
4816b8e4 1356
8f90be4c
NC
1357 /* Ensure the second source not overwritten. */
1358 if (srcreg + 1 == dstreg)
1359 return "mov %R0,%R1\n\tmov %0,%1";
1360 else
1361 return "mov %0,%1\n\tmov %R0,%R1";
1362 }
1363 else if (GET_CODE (src) == MEM)
1364 {
d72fe292 1365 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1366 int dstreg = REGNO (dst);
1367 int basereg = -1;
1368
1369 if (GET_CODE (memexp) == LABEL_REF)
1370 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1371 else if (GET_CODE (memexp) == REG)
1372 basereg = REGNO (memexp);
1373 else if (GET_CODE (memexp) == PLUS)
1374 {
1375 if (GET_CODE (XEXP (memexp, 0)) == REG)
1376 basereg = REGNO (XEXP (memexp, 0));
1377 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1378 basereg = REGNO (XEXP (memexp, 1));
1379 else
6e1f65b5 1380 gcc_unreachable ();
8f90be4c
NC
1381 }
1382 else
6e1f65b5 1383 gcc_unreachable ();
8f90be4c 1384
4816b8e4 1385 /* ??? length attribute is wrong here. */
8f90be4c
NC
1386 if (dstreg == basereg)
1387 {
4816b8e4 1388 /* Just load them in reverse order. */
8f90be4c 1389 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1390
8f90be4c 1391 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1392 and then fall through. */
8f90be4c
NC
1393 }
1394 else
1395 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1396 }
1397 else if (GET_CODE (src) == CONST_INT)
1398 {
1399 if (TARGET_LITTLE_END)
1400 {
1401 if (CONST_OK_FOR_I (INTVAL (src)))
1402 output_asm_insn ("movi %0,%1", operands);
1403 else if (CONST_OK_FOR_M (INTVAL (src)))
1404 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1405 else if (CONST_OK_FOR_N (INTVAL (src)))
1406 output_asm_insn ("bmaski %0,%N1", operands);
1407 else
6e1f65b5 1408 gcc_unreachable ();
8f90be4c
NC
1409
1410 if (INTVAL (src) < 0)
1411 return "bmaski %R0,32";
1412 else
1413 return "movi %R0,0";
1414 }
1415 else
1416 {
1417 if (CONST_OK_FOR_I (INTVAL (src)))
1418 output_asm_insn ("movi %R0,%1", operands);
1419 else if (CONST_OK_FOR_M (INTVAL (src)))
1420 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1421 else if (CONST_OK_FOR_N (INTVAL (src)))
1422 output_asm_insn ("bmaski %R0,%N1", operands);
1423 else
6e1f65b5 1424 gcc_unreachable ();
6e3a343d 1425
8f90be4c
NC
1426 if (INTVAL (src) < 0)
1427 return "bmaski %0,32";
1428 else
1429 return "movi %0,0";
1430 }
1431 }
1432 else
6e1f65b5 1433 gcc_unreachable ();
8f90be4c
NC
1434 }
1435 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1436 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1437 else
6e1f65b5 1438 gcc_unreachable ();
8f90be4c
NC
1439}
1440
1441/* Predicates used by the templates. */
1442
8f90be4c 1443int
08903e08 1444mcore_arith_S_operand (rtx op)
8f90be4c
NC
1445{
1446 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1447 return 1;
1448
1449 return 0;
1450}
1451
4816b8e4
NC
1452/* Expand insert bit field. BRC */
1453
8f90be4c 1454int
08903e08 1455mcore_expand_insv (rtx operands[])
8f90be4c
NC
1456{
1457 int width = INTVAL (operands[1]);
1458 int posn = INTVAL (operands[2]);
1459 int mask;
1460 rtx mreg, sreg, ereg;
1461
1462 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1463 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1464 we really want the md part to do. */
8f90be4c
NC
1465 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1466 {
4816b8e4
NC
1467 /* Do directly with bseti or bclri. */
1468 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1469 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1470 {
1471 mask = ~(1 << posn);
f7df4a84
RS
1472 emit_insn (gen_rtx_SET (operands[0],
1473 gen_rtx_AND (SImode, operands[0],
1474 GEN_INT (mask))));
8f90be4c
NC
1475 }
1476 else
1477 {
1478 mask = 1 << posn;
f7df4a84
RS
1479 emit_insn (gen_rtx_SET (operands[0],
1480 gen_rtx_IOR (SImode, operands[0],
1481 GEN_INT (mask))));
8f90be4c
NC
1482 }
1483
1484 return 1;
1485 }
1486
43a88a8c 1487 /* Look at some bit-field placements that we aren't interested
4816b8e4 1488 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1489 if (! TARGET_W_FIELD)
1490 return 0; /* Generally, give up about now. */
1491
1492 if (width == 8 && posn % 8 == 0)
1493 /* Byte sized and aligned; let caller break it up. */
1494 return 0;
1495
1496 if (width == 16 && posn % 16 == 0)
1497 /* Short sized and aligned; let caller break it up. */
1498 return 0;
1499
1500 /* The general case - we can do this a little bit better than what the
1501 machine independent part tries. This will get rid of all the subregs
1502 that mess up constant folding in combine when working with relaxed
4816b8e4 1503 immediates. */
8f90be4c
NC
1504
1505 /* If setting the entire field, do it directly. */
6e3a343d
NC
1506 if (GET_CODE (operands[3]) == CONST_INT
1507 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1508 {
1509 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f7df4a84
RS
1510 emit_insn (gen_rtx_SET (operands[0],
1511 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1512 return 1;
1513 }
1514
1515 /* Generate the clear mask. */
1516 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1517
1518 /* Clear the field, to overlay it later with the source. */
f7df4a84
RS
1519 emit_insn (gen_rtx_SET (operands[0],
1520 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1521
1522 /* If the source is constant 0, we've nothing to add back. */
1523 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1524 return 1;
1525
1526 /* XXX: Should we worry about more games with constant values?
1527 We've covered the high profile: set/clear single-bit and many-bit
1528 fields. How often do we see "arbitrary bit pattern" constants? */
1529 sreg = copy_to_mode_reg (SImode, operands[3]);
1530
1531 /* Extract src as same width as dst (needed for signed values). We
1532 always have to do this since we widen everything to SImode.
1533 We don't have to mask if we're shifting this up against the
1534 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1535 bits. */
f27cd94d 1536 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1537 {
1538 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f7df4a84 1539 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1540 }
1541
4816b8e4 1542 /* Insert source value in dest. */
8f90be4c 1543 if (posn != 0)
f7df4a84
RS
1544 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1545 GEN_INT (posn))));
8f90be4c 1546
f7df4a84
RS
1547 emit_insn (gen_rtx_SET (operands[0],
1548 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1549
1550 return 1;
1551}
8f90be4c
NC
1552\f
1553/* ??? Block move stuff stolen from m88k. This code has not been
1554 verified for correctness. */
1555
1556/* Emit code to perform a block move. Choose the best method.
1557
1558 OPERANDS[0] is the destination.
1559 OPERANDS[1] is the source.
1560 OPERANDS[2] is the size.
1561 OPERANDS[3] is the alignment safe to use. */
1562
1563/* Emit code to perform a block move with an offset sequence of ldw/st
1564 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1565 known constants. DEST and SRC are registers. OFFSET is the known
1566 starting point for the output pattern. */
1567
ef4bddc2 1568static const machine_mode mode_from_align[] =
8f90be4c
NC
1569{
1570 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1571};
1572
1573static void
88042663 1574block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1575{
1576 rtx temp[2];
ef4bddc2 1577 machine_mode mode[2];
8f90be4c 1578 int amount[2];
88042663 1579 bool active[2];
8f90be4c
NC
1580 int phase = 0;
1581 int next;
88042663
RH
1582 int offset_ld = 0;
1583 int offset_st = 0;
1584 rtx x;
8f90be4c 1585
88042663
RH
1586 x = XEXP (dst_mem, 0);
1587 if (!REG_P (x))
1588 {
1589 x = force_reg (Pmode, x);
1590 dst_mem = replace_equiv_address (dst_mem, x);
1591 }
8f90be4c 1592
88042663
RH
1593 x = XEXP (src_mem, 0);
1594 if (!REG_P (x))
8f90be4c 1595 {
88042663
RH
1596 x = force_reg (Pmode, x);
1597 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1598 }
1599
88042663
RH
1600 active[0] = active[1] = false;
1601
8f90be4c
NC
1602 do
1603 {
8f90be4c 1604 next = phase;
88042663 1605 phase ^= 1;
8f90be4c
NC
1606
1607 if (size > 0)
1608 {
88042663
RH
1609 int next_amount;
1610
1611 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1612 next_amount = MIN (next_amount, align);
1613
1614 amount[next] = next_amount;
1615 mode[next] = mode_from_align[next_amount];
1616 temp[next] = gen_reg_rtx (mode[next]);
1617
1618 x = adjust_address (src_mem, mode[next], offset_ld);
f7df4a84 1619 emit_insn (gen_rtx_SET (temp[next], x));
88042663
RH
1620
1621 offset_ld += next_amount;
1622 size -= next_amount;
1623 active[next] = true;
8f90be4c
NC
1624 }
1625
1626 if (active[phase])
1627 {
88042663 1628 active[phase] = false;
8f90be4c 1629
88042663 1630 x = adjust_address (dst_mem, mode[phase], offset_st);
f7df4a84 1631 emit_insn (gen_rtx_SET (x, temp[phase]));
88042663 1632
8f90be4c
NC
1633 offset_st += amount[phase];
1634 }
1635 }
1636 while (active[next]);
1637}
1638
88042663
RH
1639bool
1640mcore_expand_block_move (rtx *operands)
8f90be4c 1641{
88042663
RH
1642 HOST_WIDE_INT align, bytes, max;
1643
1644 if (GET_CODE (operands[2]) != CONST_INT)
1645 return false;
1646
1647 bytes = INTVAL (operands[2]);
1648 align = INTVAL (operands[3]);
8f90be4c 1649
88042663
RH
1650 if (bytes <= 0)
1651 return false;
1652 if (align > 4)
1653 align = 4;
1654
1655 switch (align)
8f90be4c 1656 {
88042663
RH
1657 case 4:
1658 if (bytes & 1)
1659 max = 4*4;
1660 else if (bytes & 3)
1661 max = 8*4;
1662 else
1663 max = 16*4;
1664 break;
1665 case 2:
1666 max = 4*2;
1667 break;
1668 case 1:
1669 max = 4*1;
1670 break;
1671 default:
6e1f65b5 1672 gcc_unreachable ();
88042663
RH
1673 }
1674
1675 if (bytes <= max)
1676 {
1677 block_move_sequence (operands[0], operands[1], bytes, align);
1678 return true;
8f90be4c
NC
1679 }
1680
88042663 1681 return false;
8f90be4c
NC
1682}
1683\f
1684
1685/* Code to generate prologue and epilogue sequences. */
1686static int number_of_regs_before_varargs;
4816b8e4 1687
bd5bd7ac 1688/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1689 for a varargs function. */
1690static int current_function_anonymous_args;
1691
8f90be4c
NC
1692#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1693#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1694#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1695
8f90be4c 1696static void
08903e08 1697layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1698{
1699 int n;
1700 unsigned int i;
1701 int nbytes;
1702 int regarg;
1703 int localregarg;
8f90be4c
NC
1704 int outbounds;
1705 unsigned int growths;
1706 int step;
1707
1708 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1709 was passed partially in registers and partially on the stack. */
38173d38 1710 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1711
1712 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1713 if (current_function_anonymous_args)
1714 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1715
1716 infp->arg_size = nbytes;
1717
1718 /* How much space to save non-volatile registers we stomp. */
1719 infp->reg_mask = calc_live_regs (& n);
1720 infp->reg_size = n * 4;
1721
14bc6742 1722 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1723 infp->local_size = get_frame_size ();
38173d38 1724 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1725
1726 /* Make sure we have a whole number of words for the locals. */
1727 if (infp->local_size % STACK_BYTES)
1728 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1729
1730 /* Only thing we know we have to pad is the outbound space, since
1731 we've aligned our locals assuming that base of locals is aligned. */
1732 infp->pad_local = 0;
1733 infp->pad_reg = 0;
1734 infp->pad_outbound = 0;
1735 if (infp->outbound_size % STACK_BYTES)
1736 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1737
1738 /* Now we see how we want to stage the prologue so that it does
1739 the most appropriate stack growth and register saves to either:
1740 (1) run fast,
1741 (2) reduce instruction space, or
1742 (3) reduce stack space. */
b6a1cbae 1743 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1744 infp->growth[i] = 0;
1745
1746 regarg = infp->reg_size + infp->arg_size;
1747 localregarg = infp->local_size + regarg;
8f90be4c
NC
1748 outbounds = infp->outbound_size + infp->pad_outbound;
1749 growths = 0;
1750
1751 /* XXX: Consider one where we consider localregarg + outbound too! */
1752
1753 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1754 use stw's with offsets and buy the frame in one shot. */
1755 if (localregarg <= ADDI_REACH
1756 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1757 {
1758 /* Make sure we'll be aligned. */
1759 if (localregarg % STACK_BYTES)
1760 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1761
1762 step = localregarg + infp->pad_reg;
1763 infp->reg_offset = infp->local_size;
1764
1765 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1766 {
1767 step += outbounds;
1768 infp->reg_offset += outbounds;
1769 outbounds = 0;
1770 }
1771
1772 infp->arg_offset = step - 4;
1773 infp->growth[growths++] = step;
1774 infp->reg_growth = growths;
1775 infp->local_growth = growths;
1776
4816b8e4 1777 /* If we haven't already folded it in. */
8f90be4c
NC
1778 if (outbounds)
1779 infp->growth[growths++] = outbounds;
1780
1781 goto finish;
1782 }
1783
1784 /* Frame can't be done with a single subi, but can be done with 2
1785 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1786 shift some of the stack purchase into the first subi, so both are
1787 single instructions. */
1788 if (localregarg <= STORE_REACH
1789 && (infp->local_size > ADDI_REACH)
1790 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1791 {
1792 int all;
1793
1794 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1795 if (localregarg % STACK_BYTES)
1796 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1797
1798 all = localregarg + infp->pad_reg + infp->pad_local;
1799 step = ADDI_REACH; /* As much up front as we can. */
1800 if (step > all)
1801 step = all;
1802
1803 /* XXX: Consider whether step will still be aligned; we believe so. */
1804 infp->arg_offset = step - 4;
1805 infp->growth[growths++] = step;
1806 infp->reg_growth = growths;
1807 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1808 all -= step;
1809
4816b8e4 1810 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1811 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1812 {
1813 all += outbounds;
1814 outbounds = 0;
1815 }
1816
4816b8e4 1817 /* Get the rest of the locals in place. */
8f90be4c
NC
1818 step = all;
1819 infp->growth[growths++] = step;
1820 infp->local_growth = growths;
1821 all -= step;
1822
819bfe0e 1823 gcc_assert (all == 0);
8f90be4c 1824
4816b8e4 1825 /* Finish off if we need to do so. */
8f90be4c
NC
1826 if (outbounds)
1827 infp->growth[growths++] = outbounds;
1828
1829 goto finish;
1830 }
1831
1832 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1833 Then we buy the rest of the frame in 1 or 2 steps depending on
1834 whether we need a frame pointer. */
1835 if ((regarg % STACK_BYTES) == 0)
1836 {
1837 infp->growth[growths++] = regarg;
1838 infp->reg_growth = growths;
1839 infp->arg_offset = regarg - 4;
1840 infp->reg_offset = 0;
1841
1842 if (infp->local_size % STACK_BYTES)
1843 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1844
1845 step = infp->local_size + infp->pad_local;
1846
1847 if (!frame_pointer_needed)
1848 {
1849 step += outbounds;
1850 outbounds = 0;
1851 }
1852
1853 infp->growth[growths++] = step;
1854 infp->local_growth = growths;
1855
4816b8e4 1856 /* If there's any left to be done. */
8f90be4c
NC
1857 if (outbounds)
1858 infp->growth[growths++] = outbounds;
1859
1860 goto finish;
1861 }
1862
1863 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1864 -- regarg is not aligned, but it's a small number of registers;
1865 use some of localsize so that regarg is aligned and then
1866 save the registers. */
8f90be4c
NC
1867
1868 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1869 -- does not optimize space consumption.
1870 -- does not attempt to optimize instruction counts.
1871 -- but it is safe for all alignments. */
8f90be4c
NC
1872 if (regarg % STACK_BYTES != 0)
1873 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1874
1875 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1876 infp->reg_growth = growths;
1877 infp->arg_offset = infp->growth[0] - 4;
1878 infp->reg_offset = 0;
1879
1880 if (frame_pointer_needed)
1881 {
1882 if (infp->local_size % STACK_BYTES != 0)
1883 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1884
1885 infp->growth[growths++] = infp->local_size + infp->pad_local;
1886 infp->local_growth = growths;
1887
1888 infp->growth[growths++] = outbounds;
1889 }
1890 else
1891 {
1892 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1893 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1894
1895 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1896 infp->local_growth = growths;
1897 }
1898
f27cd94d 1899 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1900 finish:
819bfe0e
JM
1901 gcc_assert (infp->reg_offset >= 0);
1902 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1903
1904 for (i = 0; i < growths; i++)
6e1f65b5 1905 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1906}
1907
1908/* Define the offset between two registers, one to be eliminated, and
1909 the other its replacement, at the start of a routine. */
4816b8e4 1910
8f90be4c 1911int
08903e08 1912mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1913{
1914 int above_frame;
1915 int below_frame;
1916 struct mcore_frame fi;
1917
1918 layout_mcore_frame (& fi);
1919
1920 /* fp to ap */
1921 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1922 /* sp to fp */
1923 below_frame = fi.outbound_size + fi.pad_outbound;
1924
1925 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1926 return above_frame;
1927
1928 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1929 return above_frame + below_frame;
1930
1931 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1932 return below_frame;
1933
6e1f65b5 1934 gcc_unreachable ();
8f90be4c
NC
1935}
1936
4816b8e4
NC
1937/* Keep track of some information about varargs for the prolog. */
1938
09a2b93a 1939static void
d5cc9181 1940mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
ef4bddc2 1941 machine_mode mode, tree type,
09a2b93a
KH
1942 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1943 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1944{
d5cc9181
JR
1945 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1946
8f90be4c
NC
1947 current_function_anonymous_args = 1;
1948
1949 /* We need to know how many argument registers are used before
1950 the varargs start, so that we can push the remaining argument
1951 registers during the prologue. */
09a2b93a 1952 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1953
dab66575 1954 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1955 Until I can find it this workaround always pushes the
1956 last named argument onto the stack. */
09a2b93a 1957 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1958
1959 /* The last named argument may be split between argument registers
1960 and the stack. Allow for this here. */
1961 if (number_of_regs_before_varargs > NPARM_REGS)
1962 number_of_regs_before_varargs = NPARM_REGS;
1963}
1964
1965void
08903e08 1966mcore_expand_prolog (void)
8f90be4c
NC
1967{
1968 struct mcore_frame fi;
1969 int space_allocated = 0;
1970 int growth = 0;
1971
1972 /* Find out what we're doing. */
1973 layout_mcore_frame (&fi);
1974
1975 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1976 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1977
1978 if (TARGET_CG_DATA)
1979 {
1980 /* Emit a symbol for this routine's frame size. */
1981 rtx x;
8f90be4c
NC
1982
1983 x = DECL_RTL (current_function_decl);
1984
6e1f65b5 1985 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1986
1987 x = XEXP (x, 0);
1988
6e1f65b5 1989 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1990
04695783 1991 free (mcore_current_function_name);
8f90be4c 1992
1dcd444b 1993 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1994
1995 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1996
e3b5732b 1997 if (cfun->calls_alloca)
8f90be4c
NC
1998 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1999
2000 /* 970425: RBE:
2001 We're looking at how the 8byte alignment affects stack layout
2002 and where we had to pad things. This emits information we can
2003 extract which tells us about frame sizes and the like. */
2004 fprintf (asm_out_file,
2005 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2006 mcore_current_function_name,
2007 fi.arg_size, fi.reg_size, fi.reg_mask,
2008 fi.local_size, fi.outbound_size,
2009 frame_pointer_needed);
2010 }
2011
2012 if (mcore_naked_function_p ())
2013 return;
2014
2015 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 2016 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2017
2018 /* If we have a parameter passed partially in regs and partially in memory,
2019 the registers will have been stored to memory already in function.c. So
2020 we only need to do something here for varargs functions. */
38173d38 2021 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2022 {
2023 int offset;
2024 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2025 int remaining = fi.arg_size;
2026
2027 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2028 {
2029 emit_insn (gen_movsi
f1c25d3b 2030 (gen_rtx_MEM (SImode,
0a81f074
RS
2031 plus_constant (Pmode, stack_pointer_rtx,
2032 offset)),
f1c25d3b 2033 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2034 }
2035 }
2036
4816b8e4 2037 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2038 if (growth < fi.reg_growth)
08903e08 2039 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2040
2041 if (fi.reg_size != 0)
2042 {
2043 int i;
2044 int offs = fi.reg_offset;
2045
2046 for (i = 15; i >= 0; i--)
2047 {
2048 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2049 {
2050 int first_reg = 15;
2051
2052 while (fi.reg_mask & (1 << first_reg))
2053 first_reg--;
2054 first_reg++;
2055
f1c25d3b
KH
2056 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2057 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2058 GEN_INT (16 - first_reg)));
2059
2060 i -= (15 - first_reg);
2061 offs += (16 - first_reg) * 4;
2062 }
2063 else if (fi.reg_mask & (1 << i))
2064 {
2065 emit_insn (gen_movsi
f1c25d3b 2066 (gen_rtx_MEM (SImode,
0a81f074
RS
2067 plus_constant (Pmode, stack_pointer_rtx,
2068 offs)),
f1c25d3b 2069 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2070 offs += 4;
2071 }
2072 }
2073 }
2074
2075 /* Figure the locals + outbounds. */
2076 if (frame_pointer_needed)
2077 {
2078 /* If we haven't already purchased to 'fp'. */
2079 if (growth < fi.local_growth)
08903e08 2080 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2081
2082 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2083
4816b8e4 2084 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2085 if (fi.growth[growth])
2086 output_stack_adjust (-1, fi.growth[growth++]);
2087 }
2088 else
2089 {
2090 if (growth < fi.local_growth)
08903e08 2091 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2092 if (fi.growth[growth])
2093 output_stack_adjust (-1, fi.growth[growth++]);
2094 }
2095}
2096
2097void
08903e08 2098mcore_expand_epilog (void)
8f90be4c
NC
2099{
2100 struct mcore_frame fi;
2101 int i;
2102 int offs;
2103 int growth = MAX_STACK_GROWS - 1 ;
2104
f27cd94d 2105
8f90be4c
NC
2106 /* Find out what we're doing. */
2107 layout_mcore_frame(&fi);
2108
2109 if (mcore_naked_function_p ())
2110 return;
f27cd94d 2111
8f90be4c
NC
2112 /* If we had a frame pointer, restore the sp from that. */
2113 if (frame_pointer_needed)
2114 {
2115 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2116 growth = fi.local_growth - 1;
2117 }
2118 else
2119 {
2120 /* XXX: while loop should accumulate and do a single sell. */
2121 while (growth >= fi.local_growth)
2122 {
2123 if (fi.growth[growth] != 0)
2124 output_stack_adjust (1, fi.growth[growth]);
2125 growth--;
2126 }
2127 }
2128
2129 /* Make sure we've shrunk stack back to the point where the registers
2130 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2131 register save information back off the stack. */
8f90be4c
NC
2132 while (growth >= fi.reg_growth)
2133 output_stack_adjust ( 1, fi.growth[growth--]);
2134
2135 offs = fi.reg_offset;
2136
2137 for (i = 15; i >= 0; i--)
2138 {
2139 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2140 {
2141 int first_reg;
2142
2143 /* Find the starting register. */
2144 first_reg = 15;
2145
2146 while (fi.reg_mask & (1 << first_reg))
2147 first_reg--;
2148
2149 first_reg++;
2150
f1c25d3b
KH
2151 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2152 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2153 GEN_INT (16 - first_reg)));
2154
2155 i -= (15 - first_reg);
2156 offs += (16 - first_reg) * 4;
2157 }
2158 else if (fi.reg_mask & (1 << i))
2159 {
2160 emit_insn (gen_movsi
f1c25d3b
KH
2161 (gen_rtx_REG (SImode, i),
2162 gen_rtx_MEM (SImode,
0a81f074
RS
2163 plus_constant (Pmode, stack_pointer_rtx,
2164 offs))));
8f90be4c
NC
2165 offs += 4;
2166 }
2167 }
2168
2169 /* Give back anything else. */
dab66575 2170 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2171 while (growth >= 0)
2172 output_stack_adjust ( 1, fi.growth[growth--]);
2173}
2174\f
2175/* This code is borrowed from the SH port. */
2176
2177/* The MCORE cannot load a large constant into a register, constants have to
2178 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2179 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2180 means that we often have to dump a constant inside a function, and
2181 generate code to branch around it.
2182
2183 It is important to minimize this, since the branches will slow things
2184 down and make things bigger.
2185
2186 Worst case code looks like:
2187
2188 lrw L1,r0
2189 br L2
2190 align
2191 L1: .long value
2192 L2:
2193 ..
2194
2195 lrw L3,r0
2196 br L4
2197 align
2198 L3: .long value
2199 L4:
2200 ..
2201
2202 We fix this by performing a scan before scheduling, which notices which
2203 instructions need to have their operands fetched from the constant table
2204 and builds the table.
2205
2206 The algorithm is:
2207
2208 scan, find an instruction which needs a pcrel move. Look forward, find the
2209 last barrier which is within MAX_COUNT bytes of the requirement.
2210 If there isn't one, make one. Process all the instructions between
2211 the find and the barrier.
2212
2213 In the above example, we can tell that L3 is within 1k of L1, so
2214 the first move can be shrunk from the 2 insn+constant sequence into
2215 just 1 insn, and the constant moved to L3 to make:
2216
2217 lrw L1,r0
2218 ..
2219 lrw L3,r0
2220 bra L4
2221 align
2222 L3:.long value
2223 L4:.long value
2224
2225 Then the second move becomes the target for the shortening process. */
2226
2227typedef struct
2228{
2229 rtx value; /* Value in table. */
2230 rtx label; /* Label of value. */
2231} pool_node;
2232
2233/* The maximum number of constants that can fit into one pool, since
2234 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2235 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2236 we need to add a branch/align before the constant pool. */
2237
2238#define MAX_COUNT 1016
2239#define MAX_POOL_SIZE (MAX_COUNT/4)
2240static pool_node pool_vector[MAX_POOL_SIZE];
2241static int pool_size;
2242
2243/* Dump out any constants accumulated in the final pass. These
2244 will only be labels. */
4816b8e4 2245
f27cd94d 2246const char *
08903e08 2247mcore_output_jump_label_table (void)
8f90be4c
NC
2248{
2249 int i;
2250
2251 if (pool_size)
2252 {
2253 fprintf (asm_out_file, "\t.align 2\n");
2254
2255 for (i = 0; i < pool_size; i++)
2256 {
2257 pool_node * p = pool_vector + i;
2258
4977bab6 2259 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2260
2261 output_asm_insn (".long %0", &p->value);
2262 }
2263
2264 pool_size = 0;
2265 }
2266
2267 return "";
2268}
2269
8f90be4c 2270/* Check whether insn is a candidate for a conditional. */
4816b8e4 2271
8f90be4c 2272static cond_type
08903e08 2273is_cond_candidate (rtx insn)
8f90be4c
NC
2274{
2275 /* The only things we conditionalize are those that can be directly
2276 changed into a conditional. Only bother with SImode items. If
2277 we wanted to be a little more aggressive, we could also do other
4816b8e4 2278 modes such as DImode with reg-reg move or load 0. */
b64925dc 2279 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2280 {
2281 rtx pat = PATTERN (insn);
2282 rtx src, dst;
2283
2284 if (GET_CODE (pat) != SET)
2285 return COND_NO;
2286
2287 dst = XEXP (pat, 0);
2288
2289 if ((GET_CODE (dst) != REG &&
2290 GET_CODE (dst) != SUBREG) ||
2291 GET_MODE (dst) != SImode)
2292 return COND_NO;
2293
2294 src = XEXP (pat, 1);
2295
2296 if ((GET_CODE (src) == REG ||
2297 (GET_CODE (src) == SUBREG &&
2298 GET_CODE (SUBREG_REG (src)) == REG)) &&
2299 GET_MODE (src) == SImode)
2300 return COND_MOV_INSN;
2301 else if (GET_CODE (src) == CONST_INT &&
2302 INTVAL (src) == 0)
2303 return COND_CLR_INSN;
2304 else if (GET_CODE (src) == PLUS &&
2305 (GET_CODE (XEXP (src, 0)) == REG ||
2306 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2307 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2308 GET_MODE (XEXP (src, 0)) == SImode &&
2309 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2310 INTVAL (XEXP (src, 1)) == 1)
2311 return COND_INC_INSN;
2312 else if (((GET_CODE (src) == MINUS &&
2313 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2314 INTVAL( XEXP (src, 1)) == 1) ||
2315 (GET_CODE (src) == PLUS &&
2316 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2317 INTVAL (XEXP (src, 1)) == -1)) &&
2318 (GET_CODE (XEXP (src, 0)) == REG ||
2319 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2320 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2321 GET_MODE (XEXP (src, 0)) == SImode)
2322 return COND_DEC_INSN;
2323
14bc6742 2324 /* Some insns that we don't bother with:
8f90be4c
NC
2325 (set (rx:DI) (ry:DI))
2326 (set (rx:DI) (const_int 0))
2327 */
2328
2329 }
b64925dc
SB
2330 else if (JUMP_P (insn)
2331 && GET_CODE (PATTERN (insn)) == SET
2332 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
8f90be4c
NC
2333 return COND_BRANCH_INSN;
2334
2335 return COND_NO;
2336}
2337
2338/* Emit a conditional version of insn and replace the old insn with the
2339 new one. Return the new insn if emitted. */
4816b8e4 2340
b32d5189 2341static rtx_insn *
d8485bdb 2342emit_new_cond_insn (rtx_insn *insn, int cond)
8f90be4c
NC
2343{
2344 rtx c_insn = 0;
2345 rtx pat, dst, src;
2346 cond_type num;
2347
2348 if ((num = is_cond_candidate (insn)) == COND_NO)
2349 return NULL;
2350
2351 pat = PATTERN (insn);
2352
b64925dc 2353 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2354 {
2355 dst = SET_DEST (pat);
2356 src = SET_SRC (pat);
2357 }
2358 else
cd4c46f3
KG
2359 {
2360 dst = JUMP_LABEL (insn);
2361 src = NULL_RTX;
2362 }
8f90be4c
NC
2363
2364 switch (num)
2365 {
2366 case COND_MOV_INSN:
2367 case COND_CLR_INSN:
2368 if (cond)
2369 c_insn = gen_movt0 (dst, src, dst);
2370 else
2371 c_insn = gen_movt0 (dst, dst, src);
2372 break;
2373
2374 case COND_INC_INSN:
2375 if (cond)
2376 c_insn = gen_incscc (dst, dst);
2377 else
2378 c_insn = gen_incscc_false (dst, dst);
2379 break;
2380
2381 case COND_DEC_INSN:
2382 if (cond)
2383 c_insn = gen_decscc (dst, dst);
2384 else
2385 c_insn = gen_decscc_false (dst, dst);
2386 break;
2387
2388 case COND_BRANCH_INSN:
2389 if (cond)
2390 c_insn = gen_branch_true (dst);
2391 else
2392 c_insn = gen_branch_false (dst);
2393 break;
2394
2395 default:
2396 return NULL;
2397 }
2398
2399 /* Only copy the notes if they exist. */
2400 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2401 {
2402 /* We really don't need to bother with the notes and links at this
2403 point, but go ahead and save the notes. This will help is_dead()
2404 when applying peepholes (links don't matter since they are not
2405 used any more beyond this point for the mcore). */
2406 REG_NOTES (c_insn) = REG_NOTES (insn);
2407 }
2408
2409 if (num == COND_BRANCH_INSN)
2410 {
2411 /* For jumps, we need to be a little bit careful and emit the new jump
2412 before the old one and to update the use count for the target label.
2413 This way, the barrier following the old (uncond) jump will get
2414 deleted, but the label won't. */
2415 c_insn = emit_jump_insn_before (c_insn, insn);
2416
2417 ++ LABEL_NUSES (dst);
2418
2419 JUMP_LABEL (c_insn) = dst;
2420 }
2421 else
2422 c_insn = emit_insn_after (c_insn, insn);
2423
2424 delete_insn (insn);
2425
b32d5189 2426 return as_a <rtx_insn *> (c_insn);
8f90be4c
NC
2427}
2428
2429/* Attempt to change a basic block into a series of conditional insns. This
2430 works by taking the branch at the end of the 1st block and scanning for the
2431 end of the 2nd block. If all instructions in the 2nd block have cond.
2432 versions and the label at the start of block 3 is the same as the target
2433 from the branch at block 1, then conditionalize all insn in block 2 using
2434 the inverse condition of the branch at block 1. (Note I'm bending the
2435 definition of basic block here.)
2436
2437 e.g., change:
2438
2439 bt L2 <-- end of block 1 (delete)
2440 mov r7,r8
2441 addu r7,1
2442 br L3 <-- end of block 2
2443
2444 L2: ... <-- start of block 3 (NUSES==1)
2445 L3: ...
2446
2447 to:
2448
2449 movf r7,r8
2450 incf r7
2451 bf L3
2452
2453 L3: ...
2454
2455 we can delete the L2 label if NUSES==1 and re-apply the optimization
2456 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2457 if-then-else statement to be conditionalized. BRC */
b32d5189
DM
2458static rtx_insn *
2459conditionalize_block (rtx_insn *first)
8f90be4c 2460{
b32d5189 2461 rtx_insn *insn;
8f90be4c 2462 rtx br_pat;
b32d5189
DM
2463 rtx_insn *end_blk_1_br = 0;
2464 rtx_insn *end_blk_2_insn = 0;
2465 rtx_insn *start_blk_3_lab = 0;
8f90be4c
NC
2466 int cond;
2467 int br_lab_num;
2468 int blk_size = 0;
2469
2470
2471 /* Check that the first insn is a candidate conditional jump. This is
2472 the one that we'll eliminate. If not, advance to the next insn to
2473 try. */
b64925dc
SB
2474 if (! JUMP_P (first)
2475 || GET_CODE (PATTERN (first)) != SET
2476 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
8f90be4c
NC
2477 return NEXT_INSN (first);
2478
2479 /* Extract some information we need. */
2480 end_blk_1_br = first;
2481 br_pat = PATTERN (end_blk_1_br);
2482
2483 /* Complement the condition since we use the reverse cond. for the insns. */
2484 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2485
2486 /* Determine what kind of branch we have. */
2487 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2488 {
2489 /* A normal branch, so extract label out of first arm. */
2490 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2491 }
2492 else
2493 {
2494 /* An inverse branch, so extract the label out of the 2nd arm
2495 and complement the condition. */
2496 cond = (cond == 0);
2497 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2498 }
2499
2500 /* Scan forward for the start of block 2: it must start with a
2501 label and that label must be the same as the branch target
2502 label from block 1. We don't care about whether block 2 actually
2503 ends with a branch or a label (an uncond. branch is
2504 conditionalizable). */
2505 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2506 {
2507 enum rtx_code code;
2508
2509 code = GET_CODE (insn);
2510
14bc6742 2511 /* Look for the label at the start of block 3. */
8f90be4c
NC
2512 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2513 break;
2514
2515 /* Skip barriers, notes, and conditionalizable insns. If the
2516 insn is not conditionalizable or makes this optimization fail,
2517 just return the next insn so we can start over from that point. */
2518 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2519 return NEXT_INSN (insn);
2520
112cdef5 2521 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2522 if (code == JUMP_INSN || code == INSN)
2523 {
2524 blk_size ++;
2525 end_blk_2_insn = insn;
2526 }
2527 }
2528
2529 if (!insn)
2530 return insn;
2531
2532 /* It is possible for this optimization to slow performance if the blocks
2533 are long. This really depends upon whether the branch is likely taken
2534 or not. If the branch is taken, we slow performance in many cases. But,
2535 if the branch is not taken, we always help performance (for a single
2536 block, but for a double block (i.e. when the optimization is re-applied)
2537 this is not true since the 'right thing' depends on the overall length of
2538 the collapsed block). As a compromise, don't apply this optimization on
2539 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2540 the best threshold depends on the latencies of the instructions (i.e.,
2541 the branch penalty). */
2542 if (optimize > 1 && blk_size > 2)
2543 return insn;
2544
2545 /* At this point, we've found the start of block 3 and we know that
2546 it is the destination of the branch from block 1. Also, all
2547 instructions in the block 2 are conditionalizable. So, apply the
2548 conditionalization and delete the branch. */
2549 start_blk_3_lab = insn;
2550
2551 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2552 insn = NEXT_INSN (insn))
2553 {
b32d5189 2554 rtx_insn *newinsn;
8f90be4c 2555
4654c0cf 2556 if (insn->deleted ())
8f90be4c
NC
2557 continue;
2558
14bc6742 2559 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2560 if ((newinsn = emit_new_cond_insn (insn, cond)))
2561 {
2562 if (end_blk_2_insn == insn)
2563 end_blk_2_insn = newinsn;
2564
2565 insn = newinsn;
2566 }
2567 }
2568
2569 /* Note whether we will delete the label starting blk 3 when the jump
2570 gets deleted. If so, we want to re-apply this optimization at the
2571 last real instruction right before the label. */
2572 if (LABEL_NUSES (start_blk_3_lab) == 1)
2573 {
2574 start_blk_3_lab = 0;
2575 }
2576
2577 /* ??? we probably should redistribute the death notes for this insn, esp.
2578 the death of cc, but it doesn't really matter this late in the game.
2579 The peepholes all use is_dead() which will find the correct death
2580 regardless of whether there is a note. */
2581 delete_insn (end_blk_1_br);
2582
2583 if (! start_blk_3_lab)
2584 return end_blk_2_insn;
2585
4816b8e4 2586 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2587 return NEXT_INSN (start_blk_3_lab);
2588}
2589
2590/* Apply the conditionalization of blocks optimization. This is the
2591 outer loop that traverses through the insns scanning for a branch
2592 that signifies an opportunity to apply the optimization. Note that
2593 this optimization is applied late. If we could apply it earlier,
2594 say before cse 2, it may expose more optimization opportunities.
2595 but, the pay back probably isn't really worth the effort (we'd have
2596 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2597 in before cse 2). */
2598
8f90be4c 2599static void
08903e08 2600conditionalize_optimization (void)
8f90be4c 2601{
b32d5189 2602 rtx_insn *insn;
8f90be4c 2603
18dbd950 2604 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2605 continue;
2606}
2607
18dbd950 2608/* This is to handle loads from the constant pool. */
4816b8e4 2609
18dbd950 2610static void
08903e08 2611mcore_reorg (void)
8f90be4c
NC
2612{
2613 /* Reset this variable. */
2614 current_function_anonymous_args = 0;
2615
8f90be4c
NC
2616 if (optimize == 0)
2617 return;
2618
2619 /* Conditionalize blocks where we can. */
18dbd950 2620 conditionalize_optimization ();
8f90be4c
NC
2621
2622 /* Literal pool generation is now pushed off until the assembler. */
2623}
2624
2625\f
f0f4da32 2626/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2627
f0f4da32 2628bool
08903e08 2629mcore_r15_operand_p (rtx x)
f0f4da32
RS
2630{
2631 switch (GET_CODE (x))
2632 {
2633 case CONST_INT:
2634 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2635
f0f4da32
RS
2636 case REG:
2637 case SUBREG:
2638 case MEM:
2639 return 1;
2640
2641 default:
2642 return 0;
2643 }
2644}
2645
0a2aaacc 2646/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2647 directly move X into it, use r1-r14 as a temporary. */
08903e08 2648
f0f4da32 2649enum reg_class
0a2aaacc 2650mcore_secondary_reload_class (enum reg_class rclass,
ef4bddc2 2651 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2652{
0a2aaacc 2653 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2654 && !mcore_r15_operand_p (x))
2655 return LRW_REGS;
2656 return NO_REGS;
2657}
8f90be4c 2658
f0f4da32 2659/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2660 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2661 use LRW_REGS instead. */
08903e08 2662
8f90be4c 2663enum reg_class
0a2aaacc 2664mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2665{
0a2aaacc 2666 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2667 return LRW_REGS;
8f90be4c 2668
0a2aaacc 2669 return rclass;
8f90be4c
NC
2670}
2671
2672/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2673 register. Note that the current version doesn't worry about whether
2674 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2675 in r2 matches an SImode in r2. Might think in the future about whether
2676 we want to be able to say something about modes. */
08903e08 2677
8f90be4c 2678int
08903e08 2679mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2680{
14bc6742 2681 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2682 while (GET_CODE (x) == SUBREG)
2683 x = SUBREG_REG (x);
2684
2685 while (GET_CODE (y) == SUBREG)
2686 y = SUBREG_REG (y);
2687
2688 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2689 return 1;
2690
2691 return 0;
2692}
2693
c5387660
JM
2694static void
2695mcore_option_override (void)
8f90be4c 2696{
8f90be4c
NC
2697 /* Only the m340 supports little endian code. */
2698 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2699 target_flags |= MASK_M340;
8f90be4c 2700}
fac0f722 2701
8f90be4c 2702\f
8f90be4c
NC
2703/* Compute the number of word sized registers needed to
2704 hold a function argument of mode MODE and type TYPE. */
08903e08 2705
8f90be4c 2706int
ef4bddc2 2707mcore_num_arg_regs (machine_mode mode, const_tree type)
8f90be4c
NC
2708{
2709 int size;
2710
fe984136 2711 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2712 return 0;
2713
2714 if (type && mode == BLKmode)
2715 size = int_size_in_bytes (type);
2716 else
2717 size = GET_MODE_SIZE (mode);
2718
2719 return ROUND_ADVANCE (size);
2720}
2721
2722static rtx
ef4bddc2 2723handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2724{
2725 int size;
2726
696e78bf 2727 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2728 of bytes is passed packed into registers (or spilled onto the stack if
2729 not enough registers are available) with the last few bytes of the
2730 structure being packed, left-justified, into the last register/stack slot.
2731 GCC handles this correctly if the last word is in a stack slot, but we
2732 have to generate a special, PARALLEL RTX if the last word is in an
2733 argument register. */
2734 if (type
2735 && TYPE_MODE (type) == BLKmode
2736 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2737 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2738 && (size % UNITS_PER_WORD != 0)
2739 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2740 {
2741 rtx arg_regs [NPARM_REGS];
2742 int nregs;
2743 rtx result;
2744 rtvec rtvec;
2745
2746 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2747 {
2748 arg_regs [nregs] =
2749 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2750 GEN_INT (nregs * UNITS_PER_WORD));
2751 nregs ++;
2752 }
2753
2754 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2755 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2756 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2757 arg_regs[3], arg_regs[4], arg_regs[5]);
2758
2759 result = gen_rtx_PARALLEL (mode, rtvec);
2760 return result;
2761 }
2762
2763 return gen_rtx_REG (mode, reg);
2764}
2765
2766rtx
cde0f3fd 2767mcore_function_value (const_tree valtype, const_tree func)
8f90be4c 2768{
ef4bddc2 2769 machine_mode mode;
8f90be4c
NC
2770 int unsigned_p;
2771
2772 mode = TYPE_MODE (valtype);
2773
cde0f3fd 2774 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2775 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2776
2777 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2778}
2779
2780/* Define where to put the arguments to a function.
2781 Value is zero to push the argument on the stack,
2782 or a hard register in which to store the argument.
2783
2784 MODE is the argument's machine mode.
2785 TYPE is the data type of the argument (as a tree).
2786 This is null for libcalls where that information may
2787 not be available.
2788 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2789 the preceding args and about the function being called.
2790 NAMED is nonzero if this argument is a named parameter
2791 (otherwise it is an extra parameter matching an ellipsis).
2792
2793 On MCore the first args are normally in registers
2794 and the rest are pushed. Any arg that starts within the first
2795 NPARM_REGS words is at least partially passed in a register unless
2796 its data type forbids. */
08903e08 2797
4665ac17 2798static rtx
ef4bddc2 2799mcore_function_arg (cumulative_args_t cum, machine_mode mode,
4665ac17 2800 const_tree type, bool named)
8f90be4c
NC
2801{
2802 int arg_reg;
2803
88042663 2804 if (! named || mode == VOIDmode)
8f90be4c
NC
2805 return 0;
2806
fe984136 2807 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2808 return 0;
2809
d5cc9181 2810 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2811
2812 if (arg_reg < NPARM_REGS)
2813 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2814
2815 return 0;
2816}
2817
4665ac17 2818static void
ef4bddc2 2819mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
4665ac17
NF
2820 const_tree type, bool named ATTRIBUTE_UNUSED)
2821{
d5cc9181
JR
2822 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2823
4665ac17
NF
2824 *cum = (ROUND_REG (*cum, mode)
2825 + (int)named * mcore_num_arg_regs (mode, type));
2826}
2827
c2ed6cf8 2828static unsigned int
ef4bddc2 2829mcore_function_arg_boundary (machine_mode mode,
c2ed6cf8
NF
2830 const_tree type ATTRIBUTE_UNUSED)
2831{
2832 /* Doubles must be aligned to an 8 byte boundary. */
2833 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2834 ? BIGGEST_ALIGNMENT
2835 : PARM_BOUNDARY);
2836}
2837
78a52f11
RH
2838/* Returns the number of bytes of argument registers required to hold *part*
2839 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2840 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2841 registers, or entirely on the stack, then 0 is returned. CUM is the
2842 number of argument registers already used by earlier parameters to
2843 the function. */
08903e08 2844
78a52f11 2845static int
ef4bddc2 2846mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
78a52f11 2847 tree type, bool named)
8f90be4c 2848{
d5cc9181 2849 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2850
2851 if (named == 0)
2852 return 0;
2853
fe984136 2854 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2855 return 0;
2856
2857 /* REG is not the *hardware* register number of the register that holds
2858 the argument, it is the *argument* register number. So for example,
2859 the first argument to a function goes in argument register 0, which
2860 translates (for the MCore) into hardware register 2. The second
2861 argument goes into argument register 1, which translates into hardware
2862 register 3, and so on. NPARM_REGS is the number of argument registers
2863 supported by the target, not the maximum hardware register number of
2864 the target. */
2865 if (reg >= NPARM_REGS)
2866 return 0;
2867
2868 /* If the argument fits entirely in registers, return 0. */
2869 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2870 return 0;
2871
2872 /* The argument overflows the number of available argument registers.
2873 Compute how many argument registers have not yet been assigned to
2874 hold an argument. */
2875 reg = NPARM_REGS - reg;
2876
2877 /* Return partially in registers and partially on the stack. */
78a52f11 2878 return reg * UNITS_PER_WORD;
8f90be4c
NC
2879}
2880\f
a0ab749a 2881/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2882
8f90be4c 2883int
08903e08 2884mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2885{
2886 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2887}
2888
a0ab749a 2889/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2890
8f90be4c 2891int
08903e08 2892mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2893{
2894 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2895}
2896
2897/* Mark a DECL as being dllexport'd. */
08903e08 2898
8f90be4c 2899static void
08903e08 2900mcore_mark_dllexport (tree decl)
8f90be4c 2901{
cbd3488b 2902 const char * oldname;
8f90be4c
NC
2903 char * newname;
2904 rtx rtlname;
2905 tree idp;
2906
2907 rtlname = XEXP (DECL_RTL (decl), 0);
2908
6e1f65b5
NS
2909 if (GET_CODE (rtlname) == MEM)
2910 rtlname = XEXP (rtlname, 0);
2911 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2912 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2913
2914 if (mcore_dllexport_name_p (oldname))
2915 return; /* Already done. */
2916
5ead67f6 2917 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2918 sprintf (newname, "@e.%s", oldname);
2919
2920 /* We pass newname through get_identifier to ensure it has a unique
2921 address. RTL processing can sometimes peek inside the symbol ref
2922 and compare the string's addresses to see if two symbols are
2923 identical. */
2924 /* ??? At least I think that's why we do this. */
2925 idp = get_identifier (newname);
2926
2927 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2928 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2929}
2930
2931/* Mark a DECL as being dllimport'd. */
08903e08 2932
8f90be4c 2933static void
08903e08 2934mcore_mark_dllimport (tree decl)
8f90be4c 2935{
cbd3488b 2936 const char * oldname;
8f90be4c
NC
2937 char * newname;
2938 tree idp;
2939 rtx rtlname;
2940 rtx newrtl;
2941
2942 rtlname = XEXP (DECL_RTL (decl), 0);
2943
6e1f65b5
NS
2944 if (GET_CODE (rtlname) == MEM)
2945 rtlname = XEXP (rtlname, 0);
2946 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2947 oldname = XSTR (rtlname, 0);
8f90be4c 2948
6e1f65b5
NS
2949 gcc_assert (!mcore_dllexport_name_p (oldname));
2950 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2951 return; /* Already done. */
2952
2953 /* ??? One can well ask why we're making these checks here,
2954 and that would be a good question. */
2955
2956 /* Imported variables can't be initialized. */
2957 if (TREE_CODE (decl) == VAR_DECL
2958 && !DECL_VIRTUAL_P (decl)
2959 && DECL_INITIAL (decl))
2960 {
dee15844 2961 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2962 return;
2963 }
2964
2965 /* `extern' needn't be specified with dllimport.
2966 Specify `extern' now and hope for the best. Sigh. */
2967 if (TREE_CODE (decl) == VAR_DECL
2968 /* ??? Is this test for vtables needed? */
2969 && !DECL_VIRTUAL_P (decl))
2970 {
2971 DECL_EXTERNAL (decl) = 1;
2972 TREE_PUBLIC (decl) = 1;
2973 }
2974
5ead67f6 2975 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2976 sprintf (newname, "@i.__imp_%s", oldname);
2977
2978 /* We pass newname through get_identifier to ensure it has a unique
2979 address. RTL processing can sometimes peek inside the symbol ref
2980 and compare the string's addresses to see if two symbols are
2981 identical. */
2982 /* ??? At least I think that's why we do this. */
2983 idp = get_identifier (newname);
2984
f1c25d3b
KH
2985 newrtl = gen_rtx_MEM (Pmode,
2986 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2987 IDENTIFIER_POINTER (idp)));
2988 XEXP (DECL_RTL (decl), 0) = newrtl;
2989}
2990
2991static int
08903e08 2992mcore_dllexport_p (tree decl)
8f90be4c
NC
2993{
2994 if ( TREE_CODE (decl) != VAR_DECL
2995 && TREE_CODE (decl) != FUNCTION_DECL)
2996 return 0;
2997
91d231cb 2998 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2999}
3000
3001static int
08903e08 3002mcore_dllimport_p (tree decl)
8f90be4c
NC
3003{
3004 if ( TREE_CODE (decl) != VAR_DECL
3005 && TREE_CODE (decl) != FUNCTION_DECL)
3006 return 0;
3007
91d231cb 3008 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
3009}
3010
fb49053f 3011/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3012 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3013
3014static void
08903e08 3015mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3016{
8f90be4c
NC
3017 /* Mark the decl so we can tell from the rtl whether the object is
3018 dllexport'd or dllimport'd. */
3019 if (mcore_dllexport_p (decl))
3020 mcore_mark_dllexport (decl);
3021 else if (mcore_dllimport_p (decl))
3022 mcore_mark_dllimport (decl);
3023
3024 /* It might be that DECL has already been marked as dllimport, but
3025 a subsequent definition nullified that. The attribute is gone
3026 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3027 else if ((TREE_CODE (decl) == FUNCTION_DECL
3028 || TREE_CODE (decl) == VAR_DECL)
3029 && DECL_RTL (decl) != NULL_RTX
3030 && GET_CODE (DECL_RTL (decl)) == MEM
3031 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3032 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3033 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3034 {
3cce094d 3035 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3036 tree idp = get_identifier (oldname + 9);
f1c25d3b 3037 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3038
3039 XEXP (DECL_RTL (decl), 0) = newrtl;
3040
3041 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3042 ??? We leave these alone for now. */
3043 }
3044}
3045
772c5265
RH
3046/* Undo the effects of the above. */
3047
3048static const char *
08903e08 3049mcore_strip_name_encoding (const char * str)
772c5265
RH
3050{
3051 return str + (str[0] == '@' ? 3 : 0);
3052}
3053
8f90be4c
NC
3054/* MCore specific attribute support.
3055 dllexport - for exporting a function/variable that will live in a dll
3056 dllimport - for importing a function/variable from a dll
3057 naked - do not create a function prologue/epilogue. */
8f90be4c 3058
91d231cb
JM
3059/* Handle a "naked" attribute; arguments as in
3060 struct attribute_spec.handler. */
08903e08 3061
91d231cb 3062static tree
08903e08
SB
3063mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3064 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3065{
d45eae79 3066 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3067 {
29d08eba
JM
3068 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3069 name);
91d231cb 3070 *no_add_attrs = true;
8f90be4c
NC
3071 }
3072
91d231cb 3073 return NULL_TREE;
8f90be4c
NC
3074}
3075
ae46c4e0
RH
3076/* ??? It looks like this is PE specific? Oh well, this is what the
3077 old code did as well. */
8f90be4c 3078
ae46c4e0 3079static void
08903e08 3080mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3081{
3082 int len;
0139adca 3083 const char * name;
8f90be4c 3084 char * string;
f27cd94d 3085 const char * prefix;
8f90be4c
NC
3086
3087 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3088
3089 /* Strip off any encoding in name. */
772c5265 3090 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3091
3092 /* The object is put in, for example, section .text$foo.
3093 The linker will then ultimately place them in .text
3094 (everything from the $ on is stripped). */
3095 if (TREE_CODE (decl) == FUNCTION_DECL)
3096 prefix = ".text$";
f710504c 3097 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3098 section might have relocs against it. */
4e4d733e 3099 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3100 prefix = ".rdata$";
3101 else
3102 prefix = ".data$";
3103
3104 len = strlen (name) + strlen (prefix);
5ead67f6 3105 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3106
3107 sprintf (string, "%s%s", prefix, name);
3108
f961457f 3109 set_decl_section_name (decl, string);
8f90be4c
NC
3110}
3111
3112int
08903e08 3113mcore_naked_function_p (void)
8f90be4c 3114{
91d231cb 3115 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3116}
7c262518 3117
d45eae79
SL
3118static bool
3119mcore_warn_func_return (tree decl)
3120{
3121 /* Naked functions are implemented entirely in assembly, including the
3122 return sequence, so suppress warnings about this. */
3123 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3124}
3125
ede75ee8 3126#ifdef OBJECT_FORMAT_ELF
7c262518 3127static void
c18a5b6c
MM
3128mcore_asm_named_section (const char *name,
3129 unsigned int flags ATTRIBUTE_UNUSED,
3130 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3131{
3132 fprintf (asm_out_file, "\t.section %s\n", name);
3133}
ede75ee8 3134#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3135
dc7efe6e
KH
3136/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3137
09a2b93a
KH
3138static void
3139mcore_external_libcall (rtx fun)
3140{
3141 fprintf (asm_out_file, "\t.import\t");
3142 assemble_name (asm_out_file, XSTR (fun, 0));
3143 fprintf (asm_out_file, "\n");
3144}
3145
dc7efe6e
KH
3146/* Worker function for TARGET_RETURN_IN_MEMORY. */
3147
09a2b93a 3148static bool
586de218 3149mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3150{
586de218 3151 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3152 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3153}
71e0af3c
RH
3154
3155/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3156 Output assembler code for a block containing the constant parts
3157 of a trampoline, leaving space for the variable parts.
3158
3159 On the MCore, the trampoline looks like:
3160 lrw r1, function
3161 lrw r13, area
3162 jmp r13
3163 or r0, r0
3164 .literals */
3165
3166static void
3167mcore_asm_trampoline_template (FILE *f)
3168{
3169 fprintf (f, "\t.short 0x7102\n");
3170 fprintf (f, "\t.short 0x7d02\n");
3171 fprintf (f, "\t.short 0x00cd\n");
3172 fprintf (f, "\t.short 0x1e00\n");
3173 fprintf (f, "\t.long 0\n");
3174 fprintf (f, "\t.long 0\n");
3175}
3176
3177/* Worker function for TARGET_TRAMPOLINE_INIT. */
3178
3179static void
3180mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3181{
3182 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3183 rtx mem;
3184
3185 emit_block_move (m_tramp, assemble_trampoline_template (),
3186 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3187
3188 mem = adjust_address (m_tramp, SImode, 8);
3189 emit_move_insn (mem, chain_value);
3190 mem = adjust_address (m_tramp, SImode, 12);
3191 emit_move_insn (mem, fnaddr);
3192}
1a627b35
RS
3193
3194/* Implement TARGET_LEGITIMATE_CONSTANT_P
3195
3196 On the MCore, allow anything but a double. */
3197
3198static bool
ef4bddc2 3199mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1a627b35
RS
3200{
3201 return GET_CODE (x) != CONST_DOUBLE;
3202}
e7c6980e
AS
3203
3204/* Helper function for `mcore_legitimate_address_p'. */
3205
3206static bool
3207mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3208{
3209 if (strict_p)
3210 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3211 else
3212 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3213}
3214
3215static bool
3216mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3217{
3218 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3219}
3220
3221/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3222 and for DI is 0..56 because we use two SI loads, etc. */
3223
3224static bool
3225mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3226{
3227 if (CONST_INT_P (op))
3228 {
3229 if (GET_MODE_SIZE (mode) >= 4
3230 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3231 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3232 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3233 return true;
3234 if (GET_MODE_SIZE (mode) == 2
3235 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3236 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3237 return true;
3238 if (GET_MODE_SIZE (mode) == 1
3239 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3240 return true;
3241 }
3242 return false;
3243}
3244
3245
3246/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3247
3248 Allow REG
3249 REG + disp */
3250
3251static bool
3252mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3253 addr_space_t as)
3254{
3255 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3256
3257 if (mcore_base_register_rtx_p (x, strict_p))
3258 return true;
3259 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3260 {
3261 rtx xop0 = XEXP (x, 0);
3262 rtx xop1 = XEXP (x, 1);
3263 if (mcore_base_register_rtx_p (xop0, strict_p)
3264 && mcore_legitimate_index_p (mode, xop1))
3265 return true;
3266 if (mcore_base_register_rtx_p (xop1, strict_p)
3267 && mcore_legitimate_index_p (mode, xop0))
3268 return true;
3269 }
3270
3271 return false;
3272}
3273
f939c3e6
RS
3274/* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
3275 even registers. */
3276
3277static bool
3278mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3279{
3280 if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
3281 return (regno & 1) == 0;
3282
3283 return regno < 18;
3284}
99e1629f
RS
3285
3286/* Implement TARGET_MODES_TIEABLE_P. */
3287
3288static bool
3289mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3290{
3291 return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
3292}