]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/mcore/mcore.c
Turn HARD_REGNO_CALL_PART_CLOBBERED into a target hook
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
CommitLineData
8f90be4c 1/* Output routines for Motorola MCore processor
cbe34bb5 2 Copyright (C) 1993-2017 Free Software Foundation, Inc.
8f90be4c 3
08903e08 4 This file is part of GCC.
8f90be4c 5
08903e08
SB
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
2f83c7d6 8 by the Free Software Foundation; either version 3, or (at your
08903e08 9 option) any later version.
8f90be4c 10
08903e08
SB
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
8f90be4c 15
08903e08 16 You should have received a copy of the GNU General Public License
2f83c7d6
NC
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
8f90be4c 19
bc27e96c 20#include "config.h"
4bd048ef 21#include "system.h"
4977bab6 22#include "coretypes.h"
c7131fb2 23#include "backend.h"
e11c4407 24#include "target.h"
4816b8e4 25#include "rtl.h"
e11c4407 26#include "tree.h"
c7131fb2 27#include "df.h"
4d0cdd0c 28#include "memmodel.h"
e11c4407
AM
29#include "tm_p.h"
30#include "stringpool.h"
314e6352 31#include "attribs.h"
e11c4407
AM
32#include "emit-rtl.h"
33#include "diagnostic-core.h"
d8a2d370
DN
34#include "stor-layout.h"
35#include "varasm.h"
d8a2d370 36#include "calls.h"
8f90be4c 37#include "mcore.h"
8f90be4c 38#include "output.h"
36566b39 39#include "explow.h"
8f90be4c 40#include "expr.h"
60393bbc 41#include "cfgrtl.h"
9b2b7279 42#include "builtins.h"
0f8012fb 43#include "regs.h"
8f90be4c 44
994c5d85 45/* This file should be included last. */
d58627a0
RS
46#include "target-def.h"
47
8f90be4c
NC
48/* For dumping information about frame sizes. */
49char * mcore_current_function_name = 0;
50long mcore_current_compilation_timestamp = 0;
51
52/* Global variables for machine-dependent things. */
53
8f90be4c
NC
54/* Provides the class number of the smallest class containing
55 reg number. */
5a82ecd9 56const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
8f90be4c
NC
57{
58 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
59 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
60 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
62 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
63};
64
f27cd94d
NC
65struct mcore_frame
66{
08903e08
SB
67 int arg_size; /* Stdarg spills (bytes). */
68 int reg_size; /* Non-volatile reg saves (bytes). */
69 int reg_mask; /* Non-volatile reg saves. */
70 int local_size; /* Locals. */
71 int outbound_size; /* Arg overflow on calls out. */
f27cd94d
NC
72 int pad_outbound;
73 int pad_local;
74 int pad_reg;
75 /* Describe the steps we'll use to grow it. */
08903e08 76#define MAX_STACK_GROWS 4 /* Gives us some spare space. */
f27cd94d
NC
77 int growth[MAX_STACK_GROWS];
78 int arg_offset;
79 int reg_offset;
80 int reg_growth;
81 int local_growth;
82};
83
84typedef enum
85{
86 COND_NO,
87 COND_MOV_INSN,
88 COND_CLR_INSN,
89 COND_INC_INSN,
90 COND_DEC_INSN,
91 COND_BRANCH_INSN
92}
93cond_type;
94
08903e08
SB
95static void output_stack_adjust (int, int);
96static int calc_live_regs (int *);
e0416079 97static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
ef4bddc2 98static const char * output_inline_const (machine_mode, rtx *);
08903e08 99static void layout_mcore_frame (struct mcore_frame *);
ef4bddc2 100static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
08903e08 101static cond_type is_cond_candidate (rtx);
6251fe93 102static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
b32d5189 103static rtx_insn *conditionalize_block (rtx_insn *);
08903e08
SB
104static void conditionalize_optimization (void);
105static void mcore_reorg (void);
ef4bddc2 106static rtx handle_structs_in_regs (machine_mode, const_tree, int);
08903e08
SB
107static void mcore_mark_dllexport (tree);
108static void mcore_mark_dllimport (tree);
109static int mcore_dllexport_p (tree);
110static int mcore_dllimport_p (tree);
08903e08 111static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
ede75ee8 112#ifdef OBJECT_FORMAT_ELF
08903e08 113static void mcore_asm_named_section (const char *,
c18a5b6c 114 unsigned int, tree);
ede75ee8 115#endif
349f851e 116static void mcore_print_operand (FILE *, rtx, int);
cc8ca59e 117static void mcore_print_operand_address (FILE *, machine_mode, rtx);
349f851e 118static bool mcore_print_operand_punct_valid_p (unsigned char code);
08903e08
SB
119static void mcore_unique_section (tree, int);
120static void mcore_encode_section_info (tree, rtx, int);
121static const char *mcore_strip_name_encoding (const char *);
d96be87b
JBG
122static int mcore_const_costs (rtx, RTX_CODE);
123static int mcore_and_cost (rtx);
124static int mcore_ior_cost (rtx);
e548c9df 125static bool mcore_rtx_costs (rtx, machine_mode, int, int,
68f932c4 126 int *, bool);
09a2b93a 127static void mcore_external_libcall (rtx);
586de218 128static bool mcore_return_in_memory (const_tree, const_tree);
d5cc9181 129static int mcore_arg_partial_bytes (cumulative_args_t,
ef4bddc2 130 machine_mode,
78a52f11 131 tree, bool);
d5cc9181 132static rtx mcore_function_arg (cumulative_args_t,
ef4bddc2 133 machine_mode,
4665ac17 134 const_tree, bool);
d5cc9181 135static void mcore_function_arg_advance (cumulative_args_t,
ef4bddc2 136 machine_mode,
4665ac17 137 const_tree, bool);
ef4bddc2 138static unsigned int mcore_function_arg_boundary (machine_mode,
c2ed6cf8 139 const_tree);
71e0af3c
RH
140static void mcore_asm_trampoline_template (FILE *);
141static void mcore_trampoline_init (rtx, tree, rtx);
d45eae79 142static bool mcore_warn_func_return (tree);
c5387660 143static void mcore_option_override (void);
ef4bddc2 144static bool mcore_legitimate_constant_p (machine_mode, rtx);
e7c6980e
AS
145static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
146 addr_space_t);
5a82ecd9
ILT
147\f
148/* MCore specific attributes. */
149
150static const struct attribute_spec mcore_attribute_table[] =
151{
62d784f7
KT
152 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
153 affects_type_identity } */
154 { "dllexport", 0, 0, true, false, false, NULL, false },
155 { "dllimport", 0, 0, true, false, false, NULL, false },
156 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
157 false },
158 { NULL, 0, 0, false, false, false, NULL, false }
5a82ecd9 159};
672a6f42
NB
160\f
161/* Initialize the GCC target structure. */
09a2b93a
KH
162#undef TARGET_ASM_EXTERNAL_LIBCALL
163#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
164
b2ca3702 165#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
08903e08
SB
166#undef TARGET_MERGE_DECL_ATTRIBUTES
167#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
672a6f42
NB
168#endif
169
301d03af 170#ifdef OBJECT_FORMAT_ELF
08903e08 171#undef TARGET_ASM_UNALIGNED_HI_OP
301d03af 172#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
08903e08 173#undef TARGET_ASM_UNALIGNED_SI_OP
301d03af
RS
174#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
175#endif
176
349f851e
NF
177#undef TARGET_PRINT_OPERAND
178#define TARGET_PRINT_OPERAND mcore_print_operand
179#undef TARGET_PRINT_OPERAND_ADDRESS
180#define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
181#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
182#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
183
08903e08
SB
184#undef TARGET_ATTRIBUTE_TABLE
185#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
186#undef TARGET_ASM_UNIQUE_SECTION
187#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
ab5c8549
JJ
188#undef TARGET_ASM_FUNCTION_RODATA_SECTION
189#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
08903e08
SB
190#undef TARGET_ENCODE_SECTION_INFO
191#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
192#undef TARGET_STRIP_NAME_ENCODING
193#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
194#undef TARGET_RTX_COSTS
195#define TARGET_RTX_COSTS mcore_rtx_costs
196#undef TARGET_ADDRESS_COST
b413068c 197#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
08903e08
SB
198#undef TARGET_MACHINE_DEPENDENT_REORG
199#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
18dbd950 200
cde0f3fd
PB
201#undef TARGET_PROMOTE_FUNCTION_MODE
202#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
09a2b93a 203#undef TARGET_PROMOTE_PROTOTYPES
586de218 204#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
09a2b93a 205
09a2b93a
KH
206#undef TARGET_RETURN_IN_MEMORY
207#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
fe984136
RH
208#undef TARGET_MUST_PASS_IN_STACK
209#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8cd5a4e0
RH
210#undef TARGET_PASS_BY_REFERENCE
211#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
78a52f11
RH
212#undef TARGET_ARG_PARTIAL_BYTES
213#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
4665ac17
NF
214#undef TARGET_FUNCTION_ARG
215#define TARGET_FUNCTION_ARG mcore_function_arg
216#undef TARGET_FUNCTION_ARG_ADVANCE
217#define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
c2ed6cf8
NF
218#undef TARGET_FUNCTION_ARG_BOUNDARY
219#define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
09a2b93a
KH
220
221#undef TARGET_SETUP_INCOMING_VARARGS
222#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
223
71e0af3c
RH
224#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
225#define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
226#undef TARGET_TRAMPOLINE_INIT
227#define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
228
c5387660
JM
229#undef TARGET_OPTION_OVERRIDE
230#define TARGET_OPTION_OVERRIDE mcore_option_override
fd02e833 231
1a627b35
RS
232#undef TARGET_LEGITIMATE_CONSTANT_P
233#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
e7c6980e
AS
234#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
235#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
1a627b35 236
d81db636
SB
237#undef TARGET_LRA_P
238#define TARGET_LRA_P hook_bool_void_false
239
d45eae79
SL
240#undef TARGET_WARN_FUNC_RETURN
241#define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
242
f6897b10 243struct gcc_target targetm = TARGET_INITIALIZER;
f27cd94d 244\f
8f90be4c
NC
245/* Adjust the stack and return the number of bytes taken to do it. */
246static void
08903e08 247output_stack_adjust (int direction, int size)
8f90be4c 248{
4816b8e4 249 /* If extending stack a lot, we do it incrementally. */
8f90be4c
NC
250 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
251 {
f1c25d3b 252 rtx tmp = gen_rtx_REG (SImode, 1);
8f90be4c 253 rtx memref;
08903e08 254
8f90be4c
NC
255 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
256 do
257 {
258 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
f1c25d3b 259 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
8f90be4c
NC
260 MEM_VOLATILE_P (memref) = 1;
261 emit_insn (gen_movsi (memref, stack_pointer_rtx));
262 size -= mcore_stack_increment;
263 }
264 while (size > mcore_stack_increment);
265
4816b8e4
NC
266 /* SIZE is now the residual for the last adjustment,
267 which doesn't require a probe. */
8f90be4c
NC
268 }
269
270 if (size)
271 {
272 rtx insn;
273 rtx val = GEN_INT (size);
274
275 if (size > 32)
276 {
f1c25d3b 277 rtx nval = gen_rtx_REG (SImode, 1);
8f90be4c
NC
278 emit_insn (gen_movsi (nval, val));
279 val = nval;
280 }
281
282 if (direction > 0)
283 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
284 else
285 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
286
287 emit_insn (insn);
288 }
289}
290
4816b8e4
NC
291/* Work out the registers which need to be saved,
292 both as a mask and a count. */
293
8f90be4c 294static int
08903e08 295calc_live_regs (int * count)
8f90be4c
NC
296{
297 int reg;
298 int live_regs_mask = 0;
299
300 * count = 0;
301
302 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
303 {
6fb5fa3c 304 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
8f90be4c
NC
305 {
306 (*count)++;
307 live_regs_mask |= (1 << reg);
308 }
309 }
310
311 return live_regs_mask;
312}
313
314/* Print the operand address in x to the stream. */
4816b8e4 315
349f851e 316static void
cc8ca59e 317mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
8f90be4c
NC
318{
319 switch (GET_CODE (x))
320 {
321 case REG:
322 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
323 break;
324
325 case PLUS:
326 {
327 rtx base = XEXP (x, 0);
328 rtx index = XEXP (x, 1);
329
330 if (GET_CODE (base) != REG)
331 {
332 /* Ensure that BASE is a register (one of them must be). */
333 rtx temp = base;
334 base = index;
335 index = temp;
336 }
337
338 switch (GET_CODE (index))
339 {
340 case CONST_INT:
fd7b8952
KG
341 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
342 reg_names[REGNO(base)], INTVAL (index));
8f90be4c
NC
343 break;
344
345 default:
6e1f65b5 346 gcc_unreachable ();
8f90be4c
NC
347 }
348 }
349
350 break;
351
352 default:
353 output_addr_const (stream, x);
354 break;
355 }
356}
357
349f851e
NF
358static bool
359mcore_print_operand_punct_valid_p (unsigned char code)
360{
361 return (code == '.' || code == '#' || code == '*' || code == '^'
362 || code == '!');
363}
364
8f90be4c
NC
365/* Print operand x (an rtx) in assembler syntax to file stream
366 according to modifier code.
367
112cdef5 368 'R' print the next register or memory location along, i.e. the lsw in
8f90be4c
NC
369 a double word value
370 'O' print a constant without the #
371 'M' print a constant as its negative
372 'P' print log2 of a power of two
373 'Q' print log2 of an inverse of a power of two
374 'U' print register for ldm/stm instruction
4816b8e4
NC
375 'X' print byte number for xtrbN instruction. */
376
349f851e 377static void
08903e08 378mcore_print_operand (FILE * stream, rtx x, int code)
8f90be4c
NC
379{
380 switch (code)
381 {
382 case 'N':
383 if (INTVAL(x) == -1)
384 fprintf (asm_out_file, "32");
385 else
386 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
387 break;
388 case 'P':
6e3a343d 389 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
8f90be4c
NC
390 break;
391 case 'Q':
392 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
393 break;
394 case 'O':
fd7b8952 395 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8f90be4c
NC
396 break;
397 case 'M':
fd7b8952 398 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
8f90be4c
NC
399 break;
400 case 'R':
401 /* Next location along in memory or register. */
402 switch (GET_CODE (x))
403 {
404 case REG:
405 fputs (reg_names[REGNO (x) + 1], (stream));
406 break;
407 case MEM:
b72f00af 408 mcore_print_operand_address
cc8ca59e 409 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
8f90be4c
NC
410 break;
411 default:
6e1f65b5 412 gcc_unreachable ();
8f90be4c
NC
413 }
414 break;
415 case 'U':
416 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
417 reg_names[REGNO (x) + 3]);
418 break;
419 case 'x':
fd7b8952 420 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
8f90be4c
NC
421 break;
422 case 'X':
fd7b8952 423 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
8f90be4c
NC
424 break;
425
426 default:
427 switch (GET_CODE (x))
428 {
429 case REG:
430 fputs (reg_names[REGNO (x)], (stream));
431 break;
432 case MEM:
cc8ca59e 433 output_address (GET_MODE (x), XEXP (x, 0));
8f90be4c
NC
434 break;
435 default:
436 output_addr_const (stream, x);
437 break;
438 }
439 break;
440 }
441}
442
443/* What does a constant cost ? */
4816b8e4 444
3c50106f 445static int
08903e08 446mcore_const_costs (rtx exp, enum rtx_code code)
8f90be4c 447{
6e3a343d 448 HOST_WIDE_INT val = INTVAL (exp);
8f90be4c
NC
449
450 /* Easy constants. */
451 if ( CONST_OK_FOR_I (val)
452 || CONST_OK_FOR_M (val)
453 || CONST_OK_FOR_N (val)
454 || (code == PLUS && CONST_OK_FOR_L (val)))
455 return 1;
456 else if (code == AND
457 && ( CONST_OK_FOR_M (~val)
458 || CONST_OK_FOR_N (~val)))
459 return 2;
460 else if (code == PLUS
461 && ( CONST_OK_FOR_I (-val)
462 || CONST_OK_FOR_M (-val)
463 || CONST_OK_FOR_N (-val)))
464 return 2;
465
466 return 5;
467}
468
469/* What does an and instruction cost - we do this b/c immediates may
470 have been relaxed. We want to ensure that cse will cse relaxed immeds
4816b8e4
NC
471 out. Otherwise we'll get bad code (multiple reloads of the same const). */
472
3c50106f 473static int
08903e08 474mcore_and_cost (rtx x)
8f90be4c 475{
6e3a343d 476 HOST_WIDE_INT val;
8f90be4c
NC
477
478 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
479 return 2;
480
481 val = INTVAL (XEXP (x, 1));
482
4816b8e4 483 /* Do it directly. */
8f90be4c
NC
484 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
485 return 2;
486 /* Takes one instruction to load. */
487 else if (const_ok_for_mcore (val))
488 return 3;
489 /* Takes two instructions to load. */
490 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
491 return 4;
492
4816b8e4 493 /* Takes a lrw to load. */
8f90be4c
NC
494 return 5;
495}
496
4816b8e4
NC
497/* What does an or cost - see and_cost(). */
498
3c50106f 499static int
08903e08 500mcore_ior_cost (rtx x)
8f90be4c 501{
6e3a343d 502 HOST_WIDE_INT val;
8f90be4c
NC
503
504 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
505 return 2;
506
507 val = INTVAL (XEXP (x, 1));
508
4816b8e4 509 /* Do it directly with bclri. */
8f90be4c
NC
510 if (CONST_OK_FOR_M (val))
511 return 2;
4816b8e4 512 /* Takes one instruction to load. */
8f90be4c
NC
513 else if (const_ok_for_mcore (val))
514 return 3;
4816b8e4 515 /* Takes two instructions to load. */
8f90be4c
NC
516 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
517 return 4;
518
4816b8e4 519 /* Takes a lrw to load. */
8f90be4c
NC
520 return 5;
521}
522
3c50106f 523static bool
e548c9df
AM
524mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
525 int opno ATTRIBUTE_UNUSED,
68f932c4 526 int * total, bool speed ATTRIBUTE_UNUSED)
3c50106f 527{
e548c9df
AM
528 int code = GET_CODE (x);
529
3c50106f
RH
530 switch (code)
531 {
532 case CONST_INT:
5a82ecd9 533 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
3c50106f
RH
534 return true;
535 case CONST:
536 case LABEL_REF:
537 case SYMBOL_REF:
538 *total = 5;
539 return true;
540 case CONST_DOUBLE:
541 *total = 10;
542 return true;
543
544 case AND:
545 *total = COSTS_N_INSNS (mcore_and_cost (x));
546 return true;
547
548 case IOR:
549 *total = COSTS_N_INSNS (mcore_ior_cost (x));
550 return true;
551
552 case DIV:
553 case UDIV:
554 case MOD:
555 case UMOD:
556 case FLOAT:
557 case FIX:
558 *total = COSTS_N_INSNS (100);
559 return true;
560
561 default:
562 return false;
563 }
564}
565
f90b7a5a
PB
566/* Prepare the operands for a comparison. Return whether the branch/setcc
567 should reverse the operands. */
4816b8e4 568
f90b7a5a
PB
569bool
570mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
8f90be4c 571{
f90b7a5a
PB
572 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
573 bool invert;
574
8f90be4c
NC
575 if (GET_CODE (op1) == CONST_INT)
576 {
6e3a343d 577 HOST_WIDE_INT val = INTVAL (op1);
8f90be4c
NC
578
579 switch (code)
580 {
f90b7a5a
PB
581 case GTU:
582 /* Unsigned > 0 is the same as != 0; everything else is converted
583 below to LEU (reversed cmphs). */
584 if (val == 0)
585 code = NE;
586 break;
587
588 /* Check whether (LE A imm) can become (LT A imm + 1),
589 or (GT A imm) can become (GE A imm + 1). */
590 case GT:
8f90be4c
NC
591 case LE:
592 if (CONST_OK_FOR_J (val + 1))
593 {
f90b7a5a
PB
594 op1 = GEN_INT (val + 1);
595 code = code == LE ? LT : GE;
8f90be4c
NC
596 }
597 break;
598
599 default:
600 break;
601 }
602 }
f90b7a5a 603
8f90be4c
NC
604 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
605 op1 = force_reg (SImode, op1);
606
607 /* cmpnei: 0-31 (K immediate)
4816b8e4 608 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
f90b7a5a 609 invert = false;
8f90be4c
NC
610 switch (code)
611 {
4816b8e4 612 case EQ: /* Use inverted condition, cmpne. */
8f90be4c 613 code = NE;
f90b7a5a 614 invert = true;
0c15dfc1 615 /* FALLTHRU */
4816b8e4
NC
616
617 case NE: /* Use normal condition, cmpne. */
8f90be4c
NC
618 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
619 op1 = force_reg (SImode, op1);
620 break;
621
4816b8e4 622 case LE: /* Use inverted condition, reversed cmplt. */
8f90be4c 623 code = GT;
f90b7a5a 624 invert = true;
0c15dfc1 625 /* FALLTHRU */
4816b8e4
NC
626
627 case GT: /* Use normal condition, reversed cmplt. */
8f90be4c
NC
628 if (GET_CODE (op1) == CONST_INT)
629 op1 = force_reg (SImode, op1);
630 break;
631
4816b8e4 632 case GE: /* Use inverted condition, cmplt. */
8f90be4c 633 code = LT;
f90b7a5a 634 invert = true;
0c15dfc1 635 /* FALLTHRU */
4816b8e4
NC
636
637 case LT: /* Use normal condition, cmplt. */
8f90be4c 638 if (GET_CODE (op1) == CONST_INT &&
08903e08 639 /* covered by btsti x,31. */
8f90be4c
NC
640 INTVAL (op1) != 0 &&
641 ! CONST_OK_FOR_J (INTVAL (op1)))
642 op1 = force_reg (SImode, op1);
643 break;
644
4816b8e4 645 case GTU: /* Use inverted condition, cmple. */
f90b7a5a 646 /* We coped with unsigned > 0 above. */
6e1f65b5 647 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
8f90be4c 648 code = LEU;
f90b7a5a 649 invert = true;
0c15dfc1 650 /* FALLTHRU */
4816b8e4 651
14bc6742 652 case LEU: /* Use normal condition, reversed cmphs. */
8f90be4c
NC
653 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
654 op1 = force_reg (SImode, op1);
655 break;
656
4816b8e4 657 case LTU: /* Use inverted condition, cmphs. */
8f90be4c 658 code = GEU;
f90b7a5a 659 invert = true;
0c15dfc1 660 /* FALLTHRU */
4816b8e4
NC
661
662 case GEU: /* Use normal condition, cmphs. */
8f90be4c
NC
663 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
664 op1 = force_reg (SImode, op1);
665 break;
666
667 default:
668 break;
669 }
670
f7df4a84 671 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
f90b7a5a 672 return invert;
8f90be4c
NC
673}
674
8f90be4c 675int
08903e08 676mcore_symbolic_address_p (rtx x)
8f90be4c
NC
677{
678 switch (GET_CODE (x))
679 {
680 case SYMBOL_REF:
681 case LABEL_REF:
682 return 1;
683 case CONST:
684 x = XEXP (x, 0);
685 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
686 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
687 && GET_CODE (XEXP (x, 1)) == CONST_INT);
688 default:
689 return 0;
690 }
691}
692
8f90be4c 693/* Functions to output assembly code for a function call. */
f27cd94d 694
8f90be4c 695char *
08903e08 696mcore_output_call (rtx operands[], int index)
8f90be4c
NC
697{
698 static char buffer[20];
699 rtx addr = operands [index];
700
701 if (REG_P (addr))
702 {
703 if (TARGET_CG_DATA)
704 {
6e1f65b5 705 gcc_assert (mcore_current_function_name);
8f90be4c
NC
706
707 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
708 "unknown", 1);
709 }
710
711 sprintf (buffer, "jsr\t%%%d", index);
712 }
713 else
714 {
715 if (TARGET_CG_DATA)
716 {
6e1f65b5
NS
717 gcc_assert (mcore_current_function_name);
718 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
8f90be4c 719
6e1f65b5
NS
720 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
721 XSTR (addr, 0), 0);
8f90be4c
NC
722 }
723
724 sprintf (buffer, "jbsr\t%%%d", index);
725 }
726
727 return buffer;
728}
729
730/* Can we load a constant with a single instruction ? */
4816b8e4 731
54d58eaf 732int
6e3a343d 733const_ok_for_mcore (HOST_WIDE_INT value)
8f90be4c
NC
734{
735 if (value >= 0 && value <= 127)
736 return 1;
737
738 /* Try exact power of two. */
6e3a343d 739 if (CONST_OK_FOR_M (value))
8f90be4c
NC
740 return 1;
741
14bc6742 742 /* Try exact power of two - 1. */
6e3a343d 743 if (CONST_OK_FOR_N (value) && value != -1)
8f90be4c
NC
744 return 1;
745
746 return 0;
747}
748
749/* Can we load a constant inline with up to 2 instructions ? */
4816b8e4 750
8f90be4c 751int
6e3a343d 752mcore_const_ok_for_inline (HOST_WIDE_INT value)
8f90be4c 753{
6e3a343d 754 HOST_WIDE_INT x, y;
8f90be4c
NC
755
756 return try_constant_tricks (value, & x, & y) > 0;
757}
758
759/* Are we loading the constant using a not ? */
4816b8e4 760
8f90be4c 761int
6e3a343d 762mcore_const_trick_uses_not (HOST_WIDE_INT value)
8f90be4c 763{
6e3a343d 764 HOST_WIDE_INT x, y;
8f90be4c
NC
765
766 return try_constant_tricks (value, & x, & y) == 2;
767}
768
769/* Try tricks to load a constant inline and return the trick number if
770 success (0 is non-inlinable).
4816b8e4
NC
771
772 0: not inlinable
773 1: single instruction (do the usual thing)
774 2: single insn followed by a 'not'
775 3: single insn followed by a subi
776 4: single insn followed by an addi
777 5: single insn followed by rsubi
778 6: single insn followed by bseti
779 7: single insn followed by bclri
780 8: single insn followed by rotli
781 9: single insn followed by lsli
782 10: single insn followed by ixh
783 11: single insn followed by ixw. */
8f90be4c
NC
784
785static int
6e3a343d 786try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
8f90be4c 787{
6e3a343d
NC
788 HOST_WIDE_INT i;
789 unsigned HOST_WIDE_INT bit, shf, rot;
8f90be4c
NC
790
791 if (const_ok_for_mcore (value))
4816b8e4 792 return 1; /* Do the usual thing. */
8f90be4c 793
6e3a343d
NC
794 if (! TARGET_HARDLIT)
795 return 0;
796
797 if (const_ok_for_mcore (~value))
798 {
799 *x = ~value;
800 return 2;
801 }
802
803 for (i = 1; i <= 32; i++)
8f90be4c 804 {
6e3a343d 805 if (const_ok_for_mcore (value - i))
8f90be4c 806 {
6e3a343d
NC
807 *x = value - i;
808 *y = i;
809
810 return 3;
8f90be4c 811 }
6e3a343d
NC
812
813 if (const_ok_for_mcore (value + i))
8f90be4c 814 {
6e3a343d
NC
815 *x = value + i;
816 *y = i;
817
818 return 4;
8f90be4c 819 }
6e3a343d
NC
820 }
821
822 bit = 0x80000000ULL;
823
824 for (i = 0; i <= 31; i++)
825 {
826 if (const_ok_for_mcore (i - value))
8f90be4c 827 {
6e3a343d
NC
828 *x = i - value;
829 *y = i;
830
831 return 5;
8f90be4c 832 }
6e3a343d
NC
833
834 if (const_ok_for_mcore (value & ~bit))
8f90be4c 835 {
6e3a343d
NC
836 *y = bit;
837 *x = value & ~bit;
838 return 6;
8f90be4c 839 }
6e3a343d
NC
840
841 if (const_ok_for_mcore (value | bit))
8f90be4c 842 {
6e3a343d
NC
843 *y = ~bit;
844 *x = value | bit;
845
846 return 7;
8f90be4c 847 }
6e3a343d
NC
848
849 bit >>= 1;
850 }
851
852 shf = value;
853 rot = value;
854
855 for (i = 1; i < 31; i++)
856 {
857 int c;
858
859 /* MCore has rotate left. */
860 c = rot << 31;
861 rot >>= 1;
862 rot &= 0x7FFFFFFF;
863 rot |= c; /* Simulate rotate. */
864
865 if (const_ok_for_mcore (rot))
8f90be4c 866 {
6e3a343d
NC
867 *y = i;
868 *x = rot;
869
870 return 8;
871 }
872
873 if (shf & 1)
874 shf = 0; /* Can't use logical shift, low order bit is one. */
875
876 shf >>= 1;
877
878 if (shf != 0 && const_ok_for_mcore (shf))
879 {
880 *y = i;
881 *x = shf;
882
883 return 9;
8f90be4c
NC
884 }
885 }
6e3a343d
NC
886
887 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
888 {
889 *x = value / 3;
890
891 return 10;
892 }
893
894 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
895 {
896 *x = value / 5;
897
898 return 11;
899 }
8f90be4c
NC
900
901 return 0;
902}
903
8f90be4c
NC
904/* Check whether reg is dead at first. This is done by searching ahead
905 for either the next use (i.e., reg is live), a death note, or a set of
906 reg. Don't just use dead_or_set_p() since reload does not always mark
907 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
4816b8e4
NC
908 can ignore subregs by extracting the actual register. BRC */
909
8f90be4c 910int
b32d5189 911mcore_is_dead (rtx_insn *first, rtx reg)
8f90be4c 912{
b32d5189 913 rtx_insn *insn;
8f90be4c
NC
914
915 /* For mcore, subregs can't live independently of their parent regs. */
916 if (GET_CODE (reg) == SUBREG)
917 reg = SUBREG_REG (reg);
918
919 /* Dies immediately. */
920 if (dead_or_set_p (first, reg))
921 return 1;
922
923 /* Look for conclusive evidence of live/death, otherwise we have
924 to assume that it is live. */
925 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
926 {
b64925dc 927 if (JUMP_P (insn))
8f90be4c
NC
928 return 0; /* We lose track, assume it is alive. */
929
b64925dc 930 else if (CALL_P (insn))
8f90be4c
NC
931 {
932 /* Call's might use it for target or register parms. */
933 if (reg_referenced_p (reg, PATTERN (insn))
934 || find_reg_fusage (insn, USE, reg))
935 return 0;
936 else if (dead_or_set_p (insn, reg))
937 return 1;
938 }
b64925dc 939 else if (NONJUMP_INSN_P (insn))
8f90be4c
NC
940 {
941 if (reg_referenced_p (reg, PATTERN (insn)))
942 return 0;
943 else if (dead_or_set_p (insn, reg))
944 return 1;
945 }
946 }
947
1e5f1716 948 /* No conclusive evidence either way, we cannot take the chance
8f90be4c
NC
949 that control flow hid the use from us -- "I'm not dead yet". */
950 return 0;
951}
952
8f90be4c 953/* Count the number of ones in mask. */
4816b8e4 954
8f90be4c 955int
6e3a343d 956mcore_num_ones (HOST_WIDE_INT mask)
8f90be4c 957{
4816b8e4 958 /* A trick to count set bits recently posted on comp.compilers. */
8f90be4c
NC
959 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
960 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
961 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
962 mask = ((mask >> 8) + mask);
963
964 return (mask + (mask >> 16)) & 0xff;
965}
966
4816b8e4
NC
967/* Count the number of zeros in mask. */
968
8f90be4c 969int
6e3a343d 970mcore_num_zeros (HOST_WIDE_INT mask)
8f90be4c
NC
971{
972 return 32 - mcore_num_ones (mask);
973}
974
975/* Determine byte being masked. */
4816b8e4 976
8f90be4c 977int
08903e08 978mcore_byte_offset (unsigned int mask)
8f90be4c 979{
11f9ed1a 980 if (mask == 0x00ffffffL)
8f90be4c 981 return 0;
11f9ed1a 982 else if (mask == 0xff00ffffL)
8f90be4c 983 return 1;
11f9ed1a 984 else if (mask == 0xffff00ffL)
8f90be4c 985 return 2;
11f9ed1a 986 else if (mask == 0xffffff00L)
8f90be4c
NC
987 return 3;
988
989 return -1;
990}
991
992/* Determine halfword being masked. */
4816b8e4 993
8f90be4c 994int
08903e08 995mcore_halfword_offset (unsigned int mask)
8f90be4c
NC
996{
997 if (mask == 0x0000ffffL)
998 return 0;
11f9ed1a 999 else if (mask == 0xffff0000L)
8f90be4c
NC
1000 return 1;
1001
1002 return -1;
1003}
1004
1005/* Output a series of bseti's corresponding to mask. */
4816b8e4 1006
f27cd94d 1007const char *
08903e08 1008mcore_output_bseti (rtx dst, int mask)
8f90be4c
NC
1009{
1010 rtx out_operands[2];
1011 int bit;
1012
1013 out_operands[0] = dst;
1014
1015 for (bit = 0; bit < 32; bit++)
1016 {
1017 if ((mask & 0x1) == 0x1)
1018 {
1019 out_operands[1] = GEN_INT (bit);
1020
1021 output_asm_insn ("bseti\t%0,%1", out_operands);
1022 }
1023 mask >>= 1;
1024 }
1025
1026 return "";
1027}
1028
1029/* Output a series of bclri's corresponding to mask. */
4816b8e4 1030
f27cd94d 1031const char *
08903e08 1032mcore_output_bclri (rtx dst, int mask)
8f90be4c
NC
1033{
1034 rtx out_operands[2];
1035 int bit;
1036
1037 out_operands[0] = dst;
1038
1039 for (bit = 0; bit < 32; bit++)
1040 {
1041 if ((mask & 0x1) == 0x0)
1042 {
1043 out_operands[1] = GEN_INT (bit);
1044
1045 output_asm_insn ("bclri\t%0,%1", out_operands);
1046 }
1047
1048 mask >>= 1;
1049 }
1050
1051 return "";
1052}
1053
1054/* Output a conditional move of two constants that are +/- 1 within each
1055 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1056 really worth the effort. */
4816b8e4 1057
f27cd94d 1058const char *
08903e08 1059mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
8f90be4c 1060{
6e3a343d
NC
1061 HOST_WIDE_INT load_value;
1062 HOST_WIDE_INT adjust_value;
8f90be4c
NC
1063 rtx out_operands[4];
1064
1065 out_operands[0] = operands[0];
1066
4816b8e4 1067 /* Check to see which constant is loadable. */
8f90be4c
NC
1068 if (const_ok_for_mcore (INTVAL (operands[1])))
1069 {
1070 out_operands[1] = operands[1];
1071 out_operands[2] = operands[2];
1072 }
1073 else if (const_ok_for_mcore (INTVAL (operands[2])))
1074 {
1075 out_operands[1] = operands[2];
1076 out_operands[2] = operands[1];
1077
4816b8e4 1078 /* Complement test since constants are swapped. */
8f90be4c
NC
1079 cmp_t = (cmp_t == 0);
1080 }
1081 load_value = INTVAL (out_operands[1]);
1082 adjust_value = INTVAL (out_operands[2]);
1083
4816b8e4 1084 /* First output the test if folded into the pattern. */
8f90be4c
NC
1085
1086 if (test)
1087 output_asm_insn (test, operands);
1088
4816b8e4 1089 /* Load the constant - for now, only support constants that can be
8f90be4c
NC
1090 generated with a single instruction. maybe add general inlinable
1091 constants later (this will increase the # of patterns since the
4816b8e4 1092 instruction sequence has a different length attribute). */
8f90be4c
NC
1093 if (load_value >= 0 && load_value <= 127)
1094 output_asm_insn ("movi\t%0,%1", out_operands);
6e3a343d 1095 else if (CONST_OK_FOR_M (load_value))
8f90be4c 1096 output_asm_insn ("bgeni\t%0,%P1", out_operands);
6e3a343d 1097 else if (CONST_OK_FOR_N (load_value))
8f90be4c
NC
1098 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1099
4816b8e4 1100 /* Output the constant adjustment. */
8f90be4c
NC
1101 if (load_value > adjust_value)
1102 {
1103 if (cmp_t)
1104 output_asm_insn ("decf\t%0", out_operands);
1105 else
1106 output_asm_insn ("dect\t%0", out_operands);
1107 }
1108 else
1109 {
1110 if (cmp_t)
1111 output_asm_insn ("incf\t%0", out_operands);
1112 else
1113 output_asm_insn ("inct\t%0", out_operands);
1114 }
1115
1116 return "";
1117}
1118
1119/* Outputs the peephole for moving a constant that gets not'ed followed
4816b8e4
NC
1120 by an and (i.e. combine the not and the and into andn). BRC */
1121
f27cd94d 1122const char *
08903e08 1123mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
8f90be4c 1124{
6e3a343d 1125 HOST_WIDE_INT x, y;
8f90be4c 1126 rtx out_operands[3];
f27cd94d 1127 const char * load_op;
8f90be4c 1128 char buf[256];
6e1f65b5 1129 int trick_no;
8f90be4c 1130
6e1f65b5
NS
1131 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1132 gcc_assert (trick_no == 2);
8f90be4c
NC
1133
1134 out_operands[0] = operands[0];
6e3a343d 1135 out_operands[1] = GEN_INT (x);
8f90be4c
NC
1136 out_operands[2] = operands[2];
1137
1138 if (x >= 0 && x <= 127)
1139 load_op = "movi\t%0,%1";
4816b8e4
NC
1140
1141 /* Try exact power of two. */
6e3a343d 1142 else if (CONST_OK_FOR_M (x))
8f90be4c 1143 load_op = "bgeni\t%0,%P1";
4816b8e4
NC
1144
1145 /* Try exact power of two - 1. */
6e3a343d 1146 else if (CONST_OK_FOR_N (x))
8f90be4c 1147 load_op = "bmaski\t%0,%N1";
4816b8e4 1148
6e3a343d
NC
1149 else
1150 {
1151 load_op = "BADMOVI-andn\t%0, %1";
1152 gcc_unreachable ();
1153 }
8f90be4c
NC
1154
1155 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1156 output_asm_insn (buf, out_operands);
1157
1158 return "";
1159}
1160
1161/* Output an inline constant. */
4816b8e4 1162
f27cd94d 1163static const char *
ef4bddc2 1164output_inline_const (machine_mode mode, rtx operands[])
8f90be4c 1165{
6e3a343d 1166 HOST_WIDE_INT x = 0, y = 0;
8f90be4c
NC
1167 int trick_no;
1168 rtx out_operands[3];
1169 char buf[256];
1170 char load_op[256];
f27cd94d 1171 const char *dst_fmt;
6e3a343d 1172 HOST_WIDE_INT value;
8f90be4c
NC
1173
1174 value = INTVAL (operands[1]);
8f90be4c 1175
6e1f65b5
NS
1176 trick_no = try_constant_tricks (value, &x, &y);
1177 /* lrw's are handled separately: Large inlinable constants never get
1178 turned into lrw's. Our caller uses try_constant_tricks to back
1179 off to an lrw rather than calling this routine. */
1180 gcc_assert (trick_no != 0);
1181
8f90be4c
NC
1182 if (trick_no == 1)
1183 x = value;
1184
4816b8e4 1185 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
8f90be4c
NC
1186 out_operands[0] = operands[0];
1187 out_operands[1] = GEN_INT (x);
1188
1189 if (trick_no > 2)
1190 out_operands[2] = GEN_INT (y);
1191
4816b8e4 1192 /* Select dst format based on mode. */
8f90be4c
NC
1193 if (mode == DImode && (! TARGET_LITTLE_END))
1194 dst_fmt = "%R0";
1195 else
1196 dst_fmt = "%0";
1197
1198 if (x >= 0 && x <= 127)
1199 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
4816b8e4 1200
8f90be4c 1201 /* Try exact power of two. */
6e3a343d 1202 else if (CONST_OK_FOR_M (x))
8f90be4c 1203 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
4816b8e4
NC
1204
1205 /* Try exact power of two - 1. */
6e3a343d 1206 else if (CONST_OK_FOR_N (x))
8f90be4c 1207 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
4816b8e4 1208
6e3a343d
NC
1209 else
1210 {
1211 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1212 gcc_unreachable ();
1213 }
8f90be4c
NC
1214
1215 switch (trick_no)
1216 {
1217 case 1:
1218 strcpy (buf, load_op);
1219 break;
1220 case 2: /* not */
6e3a343d 1221 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1222 break;
1223 case 3: /* add */
6e3a343d 1224 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1225 break;
1226 case 4: /* sub */
6e3a343d 1227 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1228 break;
1229 case 5: /* rsub */
4816b8e4 1230 /* Never happens unless -mrsubi, see try_constant_tricks(). */
6e3a343d 1231 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c 1232 break;
6e3a343d
NC
1233 case 6: /* bseti */
1234 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1235 break;
1236 case 7: /* bclr */
6e3a343d 1237 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1238 break;
1239 case 8: /* rotl */
6e3a343d 1240 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1241 break;
1242 case 9: /* lsl */
6e3a343d 1243 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
8f90be4c
NC
1244 break;
1245 case 10: /* ixh */
6e3a343d 1246 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1247 break;
1248 case 11: /* ixw */
6e3a343d 1249 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
8f90be4c
NC
1250 break;
1251 default:
1252 return "";
1253 }
1254
1255 output_asm_insn (buf, out_operands);
1256
1257 return "";
1258}
1259
1260/* Output a move of a word or less value. */
4816b8e4 1261
f27cd94d 1262const char *
08903e08 1263mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
ef4bddc2 1264 machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1265{
1266 rtx dst = operands[0];
1267 rtx src = operands[1];
1268
1269 if (GET_CODE (dst) == REG)
1270 {
1271 if (GET_CODE (src) == REG)
1272 {
1273 if (REGNO (src) == CC_REG) /* r-c */
1274 return "mvc\t%0";
1275 else
1276 return "mov\t%0,%1"; /* r-r*/
1277 }
1278 else if (GET_CODE (src) == MEM)
1279 {
1280 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1281 return "lrw\t%0,[%1]"; /* a-R */
1282 else
f0f4da32
RS
1283 switch (GET_MODE (src)) /* r-m */
1284 {
4e10a5a7 1285 case E_SImode:
f0f4da32 1286 return "ldw\t%0,%1";
4e10a5a7 1287 case E_HImode:
f0f4da32 1288 return "ld.h\t%0,%1";
4e10a5a7 1289 case E_QImode:
f0f4da32
RS
1290 return "ld.b\t%0,%1";
1291 default:
6e1f65b5 1292 gcc_unreachable ();
f0f4da32 1293 }
8f90be4c
NC
1294 }
1295 else if (GET_CODE (src) == CONST_INT)
1296 {
6e3a343d 1297 HOST_WIDE_INT x, y;
8f90be4c
NC
1298
1299 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1300 return "movi\t%0,%1";
1301 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1302 return "bgeni\t%0,%P1\t// %1 %x1";
1303 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1304 return "bmaski\t%0,%N1\t// %1 %x1";
1305 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1306 return output_inline_const (SImode, operands); /* 1-2 insns */
1307 else
4816b8e4 1308 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
8f90be4c
NC
1309 }
1310 else
4816b8e4 1311 return "lrw\t%0, %1"; /* Into the literal pool. */
8f90be4c
NC
1312 }
1313 else if (GET_CODE (dst) == MEM) /* m-r */
f0f4da32
RS
1314 switch (GET_MODE (dst))
1315 {
4e10a5a7 1316 case E_SImode:
f0f4da32 1317 return "stw\t%1,%0";
4e10a5a7 1318 case E_HImode:
f0f4da32 1319 return "st.h\t%1,%0";
4e10a5a7 1320 case E_QImode:
f0f4da32
RS
1321 return "st.b\t%1,%0";
1322 default:
6e1f65b5 1323 gcc_unreachable ();
f0f4da32 1324 }
8f90be4c 1325
6e1f65b5 1326 gcc_unreachable ();
8f90be4c
NC
1327}
1328
8f90be4c
NC
1329/* Return a sequence of instructions to perform DI or DF move.
1330 Since the MCORE cannot move a DI or DF in one instruction, we have
1331 to take care when we see overlapping source and dest registers. */
4816b8e4 1332
f27cd94d 1333const char *
ef4bddc2 1334mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
8f90be4c
NC
1335{
1336 rtx dst = operands[0];
1337 rtx src = operands[1];
1338
1339 if (GET_CODE (dst) == REG)
1340 {
1341 if (GET_CODE (src) == REG)
1342 {
1343 int dstreg = REGNO (dst);
1344 int srcreg = REGNO (src);
4816b8e4 1345
8f90be4c
NC
1346 /* Ensure the second source not overwritten. */
1347 if (srcreg + 1 == dstreg)
1348 return "mov %R0,%R1\n\tmov %0,%1";
1349 else
1350 return "mov %0,%1\n\tmov %R0,%R1";
1351 }
1352 else if (GET_CODE (src) == MEM)
1353 {
d72fe292 1354 rtx memexp = XEXP (src, 0);
8f90be4c
NC
1355 int dstreg = REGNO (dst);
1356 int basereg = -1;
1357
1358 if (GET_CODE (memexp) == LABEL_REF)
1359 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1360 else if (GET_CODE (memexp) == REG)
1361 basereg = REGNO (memexp);
1362 else if (GET_CODE (memexp) == PLUS)
1363 {
1364 if (GET_CODE (XEXP (memexp, 0)) == REG)
1365 basereg = REGNO (XEXP (memexp, 0));
1366 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1367 basereg = REGNO (XEXP (memexp, 1));
1368 else
6e1f65b5 1369 gcc_unreachable ();
8f90be4c
NC
1370 }
1371 else
6e1f65b5 1372 gcc_unreachable ();
8f90be4c 1373
4816b8e4 1374 /* ??? length attribute is wrong here. */
8f90be4c
NC
1375 if (dstreg == basereg)
1376 {
4816b8e4 1377 /* Just load them in reverse order. */
8f90be4c 1378 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
4816b8e4 1379
8f90be4c 1380 /* XXX: alternative: move basereg to basereg+1
4816b8e4 1381 and then fall through. */
8f90be4c
NC
1382 }
1383 else
1384 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1385 }
1386 else if (GET_CODE (src) == CONST_INT)
1387 {
1388 if (TARGET_LITTLE_END)
1389 {
1390 if (CONST_OK_FOR_I (INTVAL (src)))
1391 output_asm_insn ("movi %0,%1", operands);
1392 else if (CONST_OK_FOR_M (INTVAL (src)))
1393 output_asm_insn ("bgeni %0,%P1", operands);
8f90be4c
NC
1394 else if (CONST_OK_FOR_N (INTVAL (src)))
1395 output_asm_insn ("bmaski %0,%N1", operands);
1396 else
6e1f65b5 1397 gcc_unreachable ();
8f90be4c
NC
1398
1399 if (INTVAL (src) < 0)
1400 return "bmaski %R0,32";
1401 else
1402 return "movi %R0,0";
1403 }
1404 else
1405 {
1406 if (CONST_OK_FOR_I (INTVAL (src)))
1407 output_asm_insn ("movi %R0,%1", operands);
1408 else if (CONST_OK_FOR_M (INTVAL (src)))
1409 output_asm_insn ("bgeni %R0,%P1", operands);
8f90be4c
NC
1410 else if (CONST_OK_FOR_N (INTVAL (src)))
1411 output_asm_insn ("bmaski %R0,%N1", operands);
1412 else
6e1f65b5 1413 gcc_unreachable ();
6e3a343d 1414
8f90be4c
NC
1415 if (INTVAL (src) < 0)
1416 return "bmaski %0,32";
1417 else
1418 return "movi %0,0";
1419 }
1420 }
1421 else
6e1f65b5 1422 gcc_unreachable ();
8f90be4c
NC
1423 }
1424 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1425 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1426 else
6e1f65b5 1427 gcc_unreachable ();
8f90be4c
NC
1428}
1429
1430/* Predicates used by the templates. */
1431
8f90be4c 1432int
08903e08 1433mcore_arith_S_operand (rtx op)
8f90be4c
NC
1434{
1435 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1436 return 1;
1437
1438 return 0;
1439}
1440
4816b8e4
NC
1441/* Expand insert bit field. BRC */
1442
8f90be4c 1443int
08903e08 1444mcore_expand_insv (rtx operands[])
8f90be4c
NC
1445{
1446 int width = INTVAL (operands[1]);
1447 int posn = INTVAL (operands[2]);
1448 int mask;
1449 rtx mreg, sreg, ereg;
1450
1451 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1452 for width==1 must be removed. Look around line 368. This is something
4816b8e4 1453 we really want the md part to do. */
8f90be4c
NC
1454 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1455 {
4816b8e4
NC
1456 /* Do directly with bseti or bclri. */
1457 /* RBE: 2/97 consider only low bit of constant. */
6e3a343d 1458 if ((INTVAL (operands[3]) & 1) == 0)
8f90be4c
NC
1459 {
1460 mask = ~(1 << posn);
f7df4a84
RS
1461 emit_insn (gen_rtx_SET (operands[0],
1462 gen_rtx_AND (SImode, operands[0],
1463 GEN_INT (mask))));
8f90be4c
NC
1464 }
1465 else
1466 {
1467 mask = 1 << posn;
f7df4a84
RS
1468 emit_insn (gen_rtx_SET (operands[0],
1469 gen_rtx_IOR (SImode, operands[0],
1470 GEN_INT (mask))));
8f90be4c
NC
1471 }
1472
1473 return 1;
1474 }
1475
43a88a8c 1476 /* Look at some bit-field placements that we aren't interested
4816b8e4 1477 in handling ourselves, unless specifically directed to do so. */
8f90be4c
NC
1478 if (! TARGET_W_FIELD)
1479 return 0; /* Generally, give up about now. */
1480
1481 if (width == 8 && posn % 8 == 0)
1482 /* Byte sized and aligned; let caller break it up. */
1483 return 0;
1484
1485 if (width == 16 && posn % 16 == 0)
1486 /* Short sized and aligned; let caller break it up. */
1487 return 0;
1488
1489 /* The general case - we can do this a little bit better than what the
1490 machine independent part tries. This will get rid of all the subregs
1491 that mess up constant folding in combine when working with relaxed
4816b8e4 1492 immediates. */
8f90be4c
NC
1493
1494 /* If setting the entire field, do it directly. */
6e3a343d
NC
1495 if (GET_CODE (operands[3]) == CONST_INT
1496 && INTVAL (operands[3]) == ((1 << width) - 1))
8f90be4c
NC
1497 {
1498 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
f7df4a84
RS
1499 emit_insn (gen_rtx_SET (operands[0],
1500 gen_rtx_IOR (SImode, operands[0], mreg)));
8f90be4c
NC
1501 return 1;
1502 }
1503
1504 /* Generate the clear mask. */
1505 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1506
1507 /* Clear the field, to overlay it later with the source. */
f7df4a84
RS
1508 emit_insn (gen_rtx_SET (operands[0],
1509 gen_rtx_AND (SImode, operands[0], mreg)));
8f90be4c
NC
1510
1511 /* If the source is constant 0, we've nothing to add back. */
1512 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1513 return 1;
1514
1515 /* XXX: Should we worry about more games with constant values?
1516 We've covered the high profile: set/clear single-bit and many-bit
1517 fields. How often do we see "arbitrary bit pattern" constants? */
1518 sreg = copy_to_mode_reg (SImode, operands[3]);
1519
1520 /* Extract src as same width as dst (needed for signed values). We
1521 always have to do this since we widen everything to SImode.
1522 We don't have to mask if we're shifting this up against the
1523 MSB of the register (e.g., the shift will push out any hi-order
4816b8e4 1524 bits. */
f27cd94d 1525 if (width + posn != (int) GET_MODE_SIZE (SImode))
8f90be4c
NC
1526 {
1527 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
f7df4a84 1528 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
8f90be4c
NC
1529 }
1530
4816b8e4 1531 /* Insert source value in dest. */
8f90be4c 1532 if (posn != 0)
f7df4a84
RS
1533 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1534 GEN_INT (posn))));
8f90be4c 1535
f7df4a84
RS
1536 emit_insn (gen_rtx_SET (operands[0],
1537 gen_rtx_IOR (SImode, operands[0], sreg)));
8f90be4c
NC
1538
1539 return 1;
1540}
8f90be4c
NC
1541\f
1542/* ??? Block move stuff stolen from m88k. This code has not been
1543 verified for correctness. */
1544
1545/* Emit code to perform a block move. Choose the best method.
1546
1547 OPERANDS[0] is the destination.
1548 OPERANDS[1] is the source.
1549 OPERANDS[2] is the size.
1550 OPERANDS[3] is the alignment safe to use. */
1551
1552/* Emit code to perform a block move with an offset sequence of ldw/st
1553 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1554 known constants. DEST and SRC are registers. OFFSET is the known
1555 starting point for the output pattern. */
1556
ef4bddc2 1557static const machine_mode mode_from_align[] =
8f90be4c
NC
1558{
1559 VOIDmode, QImode, HImode, VOIDmode, SImode,
8f90be4c
NC
1560};
1561
1562static void
88042663 1563block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
8f90be4c
NC
1564{
1565 rtx temp[2];
ef4bddc2 1566 machine_mode mode[2];
8f90be4c 1567 int amount[2];
88042663 1568 bool active[2];
8f90be4c
NC
1569 int phase = 0;
1570 int next;
88042663
RH
1571 int offset_ld = 0;
1572 int offset_st = 0;
1573 rtx x;
8f90be4c 1574
88042663
RH
1575 x = XEXP (dst_mem, 0);
1576 if (!REG_P (x))
1577 {
1578 x = force_reg (Pmode, x);
1579 dst_mem = replace_equiv_address (dst_mem, x);
1580 }
8f90be4c 1581
88042663
RH
1582 x = XEXP (src_mem, 0);
1583 if (!REG_P (x))
8f90be4c 1584 {
88042663
RH
1585 x = force_reg (Pmode, x);
1586 src_mem = replace_equiv_address (src_mem, x);
8f90be4c
NC
1587 }
1588
88042663
RH
1589 active[0] = active[1] = false;
1590
8f90be4c
NC
1591 do
1592 {
8f90be4c 1593 next = phase;
88042663 1594 phase ^= 1;
8f90be4c
NC
1595
1596 if (size > 0)
1597 {
88042663
RH
1598 int next_amount;
1599
1600 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1601 next_amount = MIN (next_amount, align);
1602
1603 amount[next] = next_amount;
1604 mode[next] = mode_from_align[next_amount];
1605 temp[next] = gen_reg_rtx (mode[next]);
1606
1607 x = adjust_address (src_mem, mode[next], offset_ld);
f7df4a84 1608 emit_insn (gen_rtx_SET (temp[next], x));
88042663
RH
1609
1610 offset_ld += next_amount;
1611 size -= next_amount;
1612 active[next] = true;
8f90be4c
NC
1613 }
1614
1615 if (active[phase])
1616 {
88042663 1617 active[phase] = false;
8f90be4c 1618
88042663 1619 x = adjust_address (dst_mem, mode[phase], offset_st);
f7df4a84 1620 emit_insn (gen_rtx_SET (x, temp[phase]));
88042663 1621
8f90be4c
NC
1622 offset_st += amount[phase];
1623 }
1624 }
1625 while (active[next]);
1626}
1627
88042663
RH
1628bool
1629mcore_expand_block_move (rtx *operands)
8f90be4c 1630{
88042663
RH
1631 HOST_WIDE_INT align, bytes, max;
1632
1633 if (GET_CODE (operands[2]) != CONST_INT)
1634 return false;
1635
1636 bytes = INTVAL (operands[2]);
1637 align = INTVAL (operands[3]);
8f90be4c 1638
88042663
RH
1639 if (bytes <= 0)
1640 return false;
1641 if (align > 4)
1642 align = 4;
1643
1644 switch (align)
8f90be4c 1645 {
88042663
RH
1646 case 4:
1647 if (bytes & 1)
1648 max = 4*4;
1649 else if (bytes & 3)
1650 max = 8*4;
1651 else
1652 max = 16*4;
1653 break;
1654 case 2:
1655 max = 4*2;
1656 break;
1657 case 1:
1658 max = 4*1;
1659 break;
1660 default:
6e1f65b5 1661 gcc_unreachable ();
88042663
RH
1662 }
1663
1664 if (bytes <= max)
1665 {
1666 block_move_sequence (operands[0], operands[1], bytes, align);
1667 return true;
8f90be4c
NC
1668 }
1669
88042663 1670 return false;
8f90be4c
NC
1671}
1672\f
1673
1674/* Code to generate prologue and epilogue sequences. */
1675static int number_of_regs_before_varargs;
4816b8e4 1676
bd5bd7ac 1677/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
8f90be4c
NC
1678 for a varargs function. */
1679static int current_function_anonymous_args;
1680
8f90be4c
NC
1681#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1682#define STORE_REACH (64) /* Maximum displace of word store + 4. */
4816b8e4 1683#define ADDI_REACH (32) /* Maximum addi operand. */
8f90be4c 1684
8f90be4c 1685static void
08903e08 1686layout_mcore_frame (struct mcore_frame * infp)
8f90be4c
NC
1687{
1688 int n;
1689 unsigned int i;
1690 int nbytes;
1691 int regarg;
1692 int localregarg;
8f90be4c
NC
1693 int outbounds;
1694 unsigned int growths;
1695 int step;
1696
1697 /* Might have to spill bytes to re-assemble a big argument that
4816b8e4 1698 was passed partially in registers and partially on the stack. */
38173d38 1699 nbytes = crtl->args.pretend_args_size;
8f90be4c
NC
1700
1701 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1702 if (current_function_anonymous_args)
1703 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1704
1705 infp->arg_size = nbytes;
1706
1707 /* How much space to save non-volatile registers we stomp. */
1708 infp->reg_mask = calc_live_regs (& n);
1709 infp->reg_size = n * 4;
1710
14bc6742 1711 /* And the rest of it... locals and space for overflowed outbounds. */
8f90be4c 1712 infp->local_size = get_frame_size ();
38173d38 1713 infp->outbound_size = crtl->outgoing_args_size;
8f90be4c
NC
1714
1715 /* Make sure we have a whole number of words for the locals. */
1716 if (infp->local_size % STACK_BYTES)
1717 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1718
1719 /* Only thing we know we have to pad is the outbound space, since
1720 we've aligned our locals assuming that base of locals is aligned. */
1721 infp->pad_local = 0;
1722 infp->pad_reg = 0;
1723 infp->pad_outbound = 0;
1724 if (infp->outbound_size % STACK_BYTES)
1725 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1726
1727 /* Now we see how we want to stage the prologue so that it does
1728 the most appropriate stack growth and register saves to either:
1729 (1) run fast,
1730 (2) reduce instruction space, or
1731 (3) reduce stack space. */
b6a1cbae 1732 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
8f90be4c
NC
1733 infp->growth[i] = 0;
1734
1735 regarg = infp->reg_size + infp->arg_size;
1736 localregarg = infp->local_size + regarg;
8f90be4c
NC
1737 outbounds = infp->outbound_size + infp->pad_outbound;
1738 growths = 0;
1739
1740 /* XXX: Consider one where we consider localregarg + outbound too! */
1741
1742 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1743 use stw's with offsets and buy the frame in one shot. */
1744 if (localregarg <= ADDI_REACH
1745 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1746 {
1747 /* Make sure we'll be aligned. */
1748 if (localregarg % STACK_BYTES)
1749 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1750
1751 step = localregarg + infp->pad_reg;
1752 infp->reg_offset = infp->local_size;
1753
1754 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1755 {
1756 step += outbounds;
1757 infp->reg_offset += outbounds;
1758 outbounds = 0;
1759 }
1760
1761 infp->arg_offset = step - 4;
1762 infp->growth[growths++] = step;
1763 infp->reg_growth = growths;
1764 infp->local_growth = growths;
1765
4816b8e4 1766 /* If we haven't already folded it in. */
8f90be4c
NC
1767 if (outbounds)
1768 infp->growth[growths++] = outbounds;
1769
1770 goto finish;
1771 }
1772
1773 /* Frame can't be done with a single subi, but can be done with 2
1774 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1775 shift some of the stack purchase into the first subi, so both are
1776 single instructions. */
1777 if (localregarg <= STORE_REACH
1778 && (infp->local_size > ADDI_REACH)
1779 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1780 {
1781 int all;
1782
1783 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1784 if (localregarg % STACK_BYTES)
1785 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1786
1787 all = localregarg + infp->pad_reg + infp->pad_local;
1788 step = ADDI_REACH; /* As much up front as we can. */
1789 if (step > all)
1790 step = all;
1791
1792 /* XXX: Consider whether step will still be aligned; we believe so. */
1793 infp->arg_offset = step - 4;
1794 infp->growth[growths++] = step;
1795 infp->reg_growth = growths;
1796 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1797 all -= step;
1798
4816b8e4 1799 /* Can we fold in any space required for outbounds? */
8f90be4c
NC
1800 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1801 {
1802 all += outbounds;
1803 outbounds = 0;
1804 }
1805
4816b8e4 1806 /* Get the rest of the locals in place. */
8f90be4c
NC
1807 step = all;
1808 infp->growth[growths++] = step;
1809 infp->local_growth = growths;
1810 all -= step;
1811
819bfe0e 1812 gcc_assert (all == 0);
8f90be4c 1813
4816b8e4 1814 /* Finish off if we need to do so. */
8f90be4c
NC
1815 if (outbounds)
1816 infp->growth[growths++] = outbounds;
1817
1818 goto finish;
1819 }
1820
1821 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1822 Then we buy the rest of the frame in 1 or 2 steps depending on
1823 whether we need a frame pointer. */
1824 if ((regarg % STACK_BYTES) == 0)
1825 {
1826 infp->growth[growths++] = regarg;
1827 infp->reg_growth = growths;
1828 infp->arg_offset = regarg - 4;
1829 infp->reg_offset = 0;
1830
1831 if (infp->local_size % STACK_BYTES)
1832 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1833
1834 step = infp->local_size + infp->pad_local;
1835
1836 if (!frame_pointer_needed)
1837 {
1838 step += outbounds;
1839 outbounds = 0;
1840 }
1841
1842 infp->growth[growths++] = step;
1843 infp->local_growth = growths;
1844
4816b8e4 1845 /* If there's any left to be done. */
8f90be4c
NC
1846 if (outbounds)
1847 infp->growth[growths++] = outbounds;
1848
1849 goto finish;
1850 }
1851
1852 /* XXX: optimizations that we'll want to play with....
4816b8e4
NC
1853 -- regarg is not aligned, but it's a small number of registers;
1854 use some of localsize so that regarg is aligned and then
1855 save the registers. */
8f90be4c
NC
1856
1857 /* Simple encoding; plods down the stack buying the pieces as it goes.
4816b8e4
NC
1858 -- does not optimize space consumption.
1859 -- does not attempt to optimize instruction counts.
1860 -- but it is safe for all alignments. */
8f90be4c
NC
1861 if (regarg % STACK_BYTES != 0)
1862 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1863
1864 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1865 infp->reg_growth = growths;
1866 infp->arg_offset = infp->growth[0] - 4;
1867 infp->reg_offset = 0;
1868
1869 if (frame_pointer_needed)
1870 {
1871 if (infp->local_size % STACK_BYTES != 0)
1872 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1873
1874 infp->growth[growths++] = infp->local_size + infp->pad_local;
1875 infp->local_growth = growths;
1876
1877 infp->growth[growths++] = outbounds;
1878 }
1879 else
1880 {
1881 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1882 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1883
1884 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1885 infp->local_growth = growths;
1886 }
1887
f27cd94d 1888 /* Anything else that we've forgotten?, plus a few consistency checks. */
8f90be4c 1889 finish:
819bfe0e
JM
1890 gcc_assert (infp->reg_offset >= 0);
1891 gcc_assert (growths <= MAX_STACK_GROWS);
8f90be4c
NC
1892
1893 for (i = 0; i < growths; i++)
6e1f65b5 1894 gcc_assert (!(infp->growth[i] % STACK_BYTES));
8f90be4c
NC
1895}
1896
1897/* Define the offset between two registers, one to be eliminated, and
1898 the other its replacement, at the start of a routine. */
4816b8e4 1899
8f90be4c 1900int
08903e08 1901mcore_initial_elimination_offset (int from, int to)
8f90be4c
NC
1902{
1903 int above_frame;
1904 int below_frame;
1905 struct mcore_frame fi;
1906
1907 layout_mcore_frame (& fi);
1908
1909 /* fp to ap */
1910 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1911 /* sp to fp */
1912 below_frame = fi.outbound_size + fi.pad_outbound;
1913
1914 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1915 return above_frame;
1916
1917 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1918 return above_frame + below_frame;
1919
1920 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1921 return below_frame;
1922
6e1f65b5 1923 gcc_unreachable ();
8f90be4c
NC
1924}
1925
4816b8e4
NC
1926/* Keep track of some information about varargs for the prolog. */
1927
09a2b93a 1928static void
d5cc9181 1929mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
ef4bddc2 1930 machine_mode mode, tree type,
09a2b93a
KH
1931 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1932 int second_time ATTRIBUTE_UNUSED)
8f90be4c 1933{
d5cc9181
JR
1934 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1935
8f90be4c
NC
1936 current_function_anonymous_args = 1;
1937
1938 /* We need to know how many argument registers are used before
1939 the varargs start, so that we can push the remaining argument
1940 registers during the prologue. */
09a2b93a 1941 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
8f90be4c 1942
dab66575 1943 /* There is a bug somewhere in the arg handling code.
8f90be4c
NC
1944 Until I can find it this workaround always pushes the
1945 last named argument onto the stack. */
09a2b93a 1946 number_of_regs_before_varargs = *args_so_far;
8f90be4c
NC
1947
1948 /* The last named argument may be split between argument registers
1949 and the stack. Allow for this here. */
1950 if (number_of_regs_before_varargs > NPARM_REGS)
1951 number_of_regs_before_varargs = NPARM_REGS;
1952}
1953
1954void
08903e08 1955mcore_expand_prolog (void)
8f90be4c
NC
1956{
1957 struct mcore_frame fi;
1958 int space_allocated = 0;
1959 int growth = 0;
1960
1961 /* Find out what we're doing. */
1962 layout_mcore_frame (&fi);
1963
1964 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1965 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1966
1967 if (TARGET_CG_DATA)
1968 {
1969 /* Emit a symbol for this routine's frame size. */
1970 rtx x;
8f90be4c
NC
1971
1972 x = DECL_RTL (current_function_decl);
1973
6e1f65b5 1974 gcc_assert (GET_CODE (x) == MEM);
8f90be4c
NC
1975
1976 x = XEXP (x, 0);
1977
6e1f65b5 1978 gcc_assert (GET_CODE (x) == SYMBOL_REF);
8f90be4c 1979
04695783 1980 free (mcore_current_function_name);
8f90be4c 1981
1dcd444b 1982 mcore_current_function_name = xstrdup (XSTR (x, 0));
8f90be4c
NC
1983
1984 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1985
e3b5732b 1986 if (cfun->calls_alloca)
8f90be4c
NC
1987 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1988
1989 /* 970425: RBE:
1990 We're looking at how the 8byte alignment affects stack layout
1991 and where we had to pad things. This emits information we can
1992 extract which tells us about frame sizes and the like. */
1993 fprintf (asm_out_file,
1994 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1995 mcore_current_function_name,
1996 fi.arg_size, fi.reg_size, fi.reg_mask,
1997 fi.local_size, fi.outbound_size,
1998 frame_pointer_needed);
1999 }
2000
2001 if (mcore_naked_function_p ())
2002 return;
2003
2004 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
08903e08 2005 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2006
2007 /* If we have a parameter passed partially in regs and partially in memory,
2008 the registers will have been stored to memory already in function.c. So
2009 we only need to do something here for varargs functions. */
38173d38 2010 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
8f90be4c
NC
2011 {
2012 int offset;
2013 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2014 int remaining = fi.arg_size;
2015
2016 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2017 {
2018 emit_insn (gen_movsi
f1c25d3b 2019 (gen_rtx_MEM (SImode,
0a81f074
RS
2020 plus_constant (Pmode, stack_pointer_rtx,
2021 offset)),
f1c25d3b 2022 gen_rtx_REG (SImode, rn)));
8f90be4c
NC
2023 }
2024 }
2025
4816b8e4 2026 /* Do we need another stack adjustment before we do the register saves? */
8f90be4c 2027 if (growth < fi.reg_growth)
08903e08 2028 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2029
2030 if (fi.reg_size != 0)
2031 {
2032 int i;
2033 int offs = fi.reg_offset;
2034
2035 for (i = 15; i >= 0; i--)
2036 {
2037 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2038 {
2039 int first_reg = 15;
2040
2041 while (fi.reg_mask & (1 << first_reg))
2042 first_reg--;
2043 first_reg++;
2044
f1c25d3b
KH
2045 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2046 gen_rtx_REG (SImode, first_reg),
8f90be4c
NC
2047 GEN_INT (16 - first_reg)));
2048
2049 i -= (15 - first_reg);
2050 offs += (16 - first_reg) * 4;
2051 }
2052 else if (fi.reg_mask & (1 << i))
2053 {
2054 emit_insn (gen_movsi
f1c25d3b 2055 (gen_rtx_MEM (SImode,
0a81f074
RS
2056 plus_constant (Pmode, stack_pointer_rtx,
2057 offs)),
f1c25d3b 2058 gen_rtx_REG (SImode, i)));
8f90be4c
NC
2059 offs += 4;
2060 }
2061 }
2062 }
2063
2064 /* Figure the locals + outbounds. */
2065 if (frame_pointer_needed)
2066 {
2067 /* If we haven't already purchased to 'fp'. */
2068 if (growth < fi.local_growth)
08903e08 2069 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2070
2071 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2072
4816b8e4 2073 /* ... and then go any remaining distance for outbounds, etc. */
8f90be4c
NC
2074 if (fi.growth[growth])
2075 output_stack_adjust (-1, fi.growth[growth++]);
2076 }
2077 else
2078 {
2079 if (growth < fi.local_growth)
08903e08 2080 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
8f90be4c
NC
2081 if (fi.growth[growth])
2082 output_stack_adjust (-1, fi.growth[growth++]);
2083 }
2084}
2085
2086void
08903e08 2087mcore_expand_epilog (void)
8f90be4c
NC
2088{
2089 struct mcore_frame fi;
2090 int i;
2091 int offs;
2092 int growth = MAX_STACK_GROWS - 1 ;
2093
f27cd94d 2094
8f90be4c
NC
2095 /* Find out what we're doing. */
2096 layout_mcore_frame(&fi);
2097
2098 if (mcore_naked_function_p ())
2099 return;
f27cd94d 2100
8f90be4c
NC
2101 /* If we had a frame pointer, restore the sp from that. */
2102 if (frame_pointer_needed)
2103 {
2104 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2105 growth = fi.local_growth - 1;
2106 }
2107 else
2108 {
2109 /* XXX: while loop should accumulate and do a single sell. */
2110 while (growth >= fi.local_growth)
2111 {
2112 if (fi.growth[growth] != 0)
2113 output_stack_adjust (1, fi.growth[growth]);
2114 growth--;
2115 }
2116 }
2117
2118 /* Make sure we've shrunk stack back to the point where the registers
2119 were laid down. This is typically 0/1 iterations. Then pull the
4816b8e4 2120 register save information back off the stack. */
8f90be4c
NC
2121 while (growth >= fi.reg_growth)
2122 output_stack_adjust ( 1, fi.growth[growth--]);
2123
2124 offs = fi.reg_offset;
2125
2126 for (i = 15; i >= 0; i--)
2127 {
2128 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2129 {
2130 int first_reg;
2131
2132 /* Find the starting register. */
2133 first_reg = 15;
2134
2135 while (fi.reg_mask & (1 << first_reg))
2136 first_reg--;
2137
2138 first_reg++;
2139
f1c25d3b
KH
2140 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2141 gen_rtx_MEM (SImode, stack_pointer_rtx),
8f90be4c
NC
2142 GEN_INT (16 - first_reg)));
2143
2144 i -= (15 - first_reg);
2145 offs += (16 - first_reg) * 4;
2146 }
2147 else if (fi.reg_mask & (1 << i))
2148 {
2149 emit_insn (gen_movsi
f1c25d3b
KH
2150 (gen_rtx_REG (SImode, i),
2151 gen_rtx_MEM (SImode,
0a81f074
RS
2152 plus_constant (Pmode, stack_pointer_rtx,
2153 offs))));
8f90be4c
NC
2154 offs += 4;
2155 }
2156 }
2157
2158 /* Give back anything else. */
dab66575 2159 /* XXX: Should accumulate total and then give it back. */
8f90be4c
NC
2160 while (growth >= 0)
2161 output_stack_adjust ( 1, fi.growth[growth--]);
2162}
2163\f
2164/* This code is borrowed from the SH port. */
2165
2166/* The MCORE cannot load a large constant into a register, constants have to
2167 come from a pc relative load. The reference of a pc relative load
0fa2e4df 2168 instruction must be less than 1k in front of the instruction. This
8f90be4c
NC
2169 means that we often have to dump a constant inside a function, and
2170 generate code to branch around it.
2171
2172 It is important to minimize this, since the branches will slow things
2173 down and make things bigger.
2174
2175 Worst case code looks like:
2176
2177 lrw L1,r0
2178 br L2
2179 align
2180 L1: .long value
2181 L2:
2182 ..
2183
2184 lrw L3,r0
2185 br L4
2186 align
2187 L3: .long value
2188 L4:
2189 ..
2190
2191 We fix this by performing a scan before scheduling, which notices which
2192 instructions need to have their operands fetched from the constant table
2193 and builds the table.
2194
2195 The algorithm is:
2196
2197 scan, find an instruction which needs a pcrel move. Look forward, find the
2198 last barrier which is within MAX_COUNT bytes of the requirement.
2199 If there isn't one, make one. Process all the instructions between
2200 the find and the barrier.
2201
2202 In the above example, we can tell that L3 is within 1k of L1, so
2203 the first move can be shrunk from the 2 insn+constant sequence into
2204 just 1 insn, and the constant moved to L3 to make:
2205
2206 lrw L1,r0
2207 ..
2208 lrw L3,r0
2209 bra L4
2210 align
2211 L3:.long value
2212 L4:.long value
2213
2214 Then the second move becomes the target for the shortening process. */
2215
2216typedef struct
2217{
2218 rtx value; /* Value in table. */
2219 rtx label; /* Label of value. */
2220} pool_node;
2221
2222/* The maximum number of constants that can fit into one pool, since
2223 the pc relative range is 0...1020 bytes and constants are at least 4
2a43945f 2224 bytes long. We subtract 4 from the range to allow for the case where
8f90be4c
NC
2225 we need to add a branch/align before the constant pool. */
2226
2227#define MAX_COUNT 1016
2228#define MAX_POOL_SIZE (MAX_COUNT/4)
2229static pool_node pool_vector[MAX_POOL_SIZE];
2230static int pool_size;
2231
2232/* Dump out any constants accumulated in the final pass. These
2233 will only be labels. */
4816b8e4 2234
f27cd94d 2235const char *
08903e08 2236mcore_output_jump_label_table (void)
8f90be4c
NC
2237{
2238 int i;
2239
2240 if (pool_size)
2241 {
2242 fprintf (asm_out_file, "\t.align 2\n");
2243
2244 for (i = 0; i < pool_size; i++)
2245 {
2246 pool_node * p = pool_vector + i;
2247
4977bab6 2248 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
8f90be4c
NC
2249
2250 output_asm_insn (".long %0", &p->value);
2251 }
2252
2253 pool_size = 0;
2254 }
2255
2256 return "";
2257}
2258
8f90be4c 2259/* Check whether insn is a candidate for a conditional. */
4816b8e4 2260
8f90be4c 2261static cond_type
08903e08 2262is_cond_candidate (rtx insn)
8f90be4c
NC
2263{
2264 /* The only things we conditionalize are those that can be directly
2265 changed into a conditional. Only bother with SImode items. If
2266 we wanted to be a little more aggressive, we could also do other
4816b8e4 2267 modes such as DImode with reg-reg move or load 0. */
b64925dc 2268 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2269 {
2270 rtx pat = PATTERN (insn);
2271 rtx src, dst;
2272
2273 if (GET_CODE (pat) != SET)
2274 return COND_NO;
2275
2276 dst = XEXP (pat, 0);
2277
2278 if ((GET_CODE (dst) != REG &&
2279 GET_CODE (dst) != SUBREG) ||
2280 GET_MODE (dst) != SImode)
2281 return COND_NO;
2282
2283 src = XEXP (pat, 1);
2284
2285 if ((GET_CODE (src) == REG ||
2286 (GET_CODE (src) == SUBREG &&
2287 GET_CODE (SUBREG_REG (src)) == REG)) &&
2288 GET_MODE (src) == SImode)
2289 return COND_MOV_INSN;
2290 else if (GET_CODE (src) == CONST_INT &&
2291 INTVAL (src) == 0)
2292 return COND_CLR_INSN;
2293 else if (GET_CODE (src) == PLUS &&
2294 (GET_CODE (XEXP (src, 0)) == REG ||
2295 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2296 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2297 GET_MODE (XEXP (src, 0)) == SImode &&
2298 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2299 INTVAL (XEXP (src, 1)) == 1)
2300 return COND_INC_INSN;
2301 else if (((GET_CODE (src) == MINUS &&
2302 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2303 INTVAL( XEXP (src, 1)) == 1) ||
2304 (GET_CODE (src) == PLUS &&
2305 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2306 INTVAL (XEXP (src, 1)) == -1)) &&
2307 (GET_CODE (XEXP (src, 0)) == REG ||
2308 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2309 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2310 GET_MODE (XEXP (src, 0)) == SImode)
2311 return COND_DEC_INSN;
2312
14bc6742 2313 /* Some insns that we don't bother with:
8f90be4c
NC
2314 (set (rx:DI) (ry:DI))
2315 (set (rx:DI) (const_int 0))
2316 */
2317
2318 }
b64925dc
SB
2319 else if (JUMP_P (insn)
2320 && GET_CODE (PATTERN (insn)) == SET
2321 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
8f90be4c
NC
2322 return COND_BRANCH_INSN;
2323
2324 return COND_NO;
2325}
2326
2327/* Emit a conditional version of insn and replace the old insn with the
2328 new one. Return the new insn if emitted. */
4816b8e4 2329
b32d5189 2330static rtx_insn *
d8485bdb 2331emit_new_cond_insn (rtx_insn *insn, int cond)
8f90be4c
NC
2332{
2333 rtx c_insn = 0;
2334 rtx pat, dst, src;
2335 cond_type num;
2336
2337 if ((num = is_cond_candidate (insn)) == COND_NO)
2338 return NULL;
2339
2340 pat = PATTERN (insn);
2341
b64925dc 2342 if (NONJUMP_INSN_P (insn))
8f90be4c
NC
2343 {
2344 dst = SET_DEST (pat);
2345 src = SET_SRC (pat);
2346 }
2347 else
cd4c46f3
KG
2348 {
2349 dst = JUMP_LABEL (insn);
2350 src = NULL_RTX;
2351 }
8f90be4c
NC
2352
2353 switch (num)
2354 {
2355 case COND_MOV_INSN:
2356 case COND_CLR_INSN:
2357 if (cond)
2358 c_insn = gen_movt0 (dst, src, dst);
2359 else
2360 c_insn = gen_movt0 (dst, dst, src);
2361 break;
2362
2363 case COND_INC_INSN:
2364 if (cond)
2365 c_insn = gen_incscc (dst, dst);
2366 else
2367 c_insn = gen_incscc_false (dst, dst);
2368 break;
2369
2370 case COND_DEC_INSN:
2371 if (cond)
2372 c_insn = gen_decscc (dst, dst);
2373 else
2374 c_insn = gen_decscc_false (dst, dst);
2375 break;
2376
2377 case COND_BRANCH_INSN:
2378 if (cond)
2379 c_insn = gen_branch_true (dst);
2380 else
2381 c_insn = gen_branch_false (dst);
2382 break;
2383
2384 default:
2385 return NULL;
2386 }
2387
2388 /* Only copy the notes if they exist. */
2389 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2390 {
2391 /* We really don't need to bother with the notes and links at this
2392 point, but go ahead and save the notes. This will help is_dead()
2393 when applying peepholes (links don't matter since they are not
2394 used any more beyond this point for the mcore). */
2395 REG_NOTES (c_insn) = REG_NOTES (insn);
2396 }
2397
2398 if (num == COND_BRANCH_INSN)
2399 {
2400 /* For jumps, we need to be a little bit careful and emit the new jump
2401 before the old one and to update the use count for the target label.
2402 This way, the barrier following the old (uncond) jump will get
2403 deleted, but the label won't. */
2404 c_insn = emit_jump_insn_before (c_insn, insn);
2405
2406 ++ LABEL_NUSES (dst);
2407
2408 JUMP_LABEL (c_insn) = dst;
2409 }
2410 else
2411 c_insn = emit_insn_after (c_insn, insn);
2412
2413 delete_insn (insn);
2414
b32d5189 2415 return as_a <rtx_insn *> (c_insn);
8f90be4c
NC
2416}
2417
2418/* Attempt to change a basic block into a series of conditional insns. This
2419 works by taking the branch at the end of the 1st block and scanning for the
2420 end of the 2nd block. If all instructions in the 2nd block have cond.
2421 versions and the label at the start of block 3 is the same as the target
2422 from the branch at block 1, then conditionalize all insn in block 2 using
2423 the inverse condition of the branch at block 1. (Note I'm bending the
2424 definition of basic block here.)
2425
2426 e.g., change:
2427
2428 bt L2 <-- end of block 1 (delete)
2429 mov r7,r8
2430 addu r7,1
2431 br L3 <-- end of block 2
2432
2433 L2: ... <-- start of block 3 (NUSES==1)
2434 L3: ...
2435
2436 to:
2437
2438 movf r7,r8
2439 incf r7
2440 bf L3
2441
2442 L3: ...
2443
2444 we can delete the L2 label if NUSES==1 and re-apply the optimization
2445 starting at the last instruction of block 2. This may allow an entire
4816b8e4 2446 if-then-else statement to be conditionalized. BRC */
b32d5189
DM
2447static rtx_insn *
2448conditionalize_block (rtx_insn *first)
8f90be4c 2449{
b32d5189 2450 rtx_insn *insn;
8f90be4c 2451 rtx br_pat;
b32d5189
DM
2452 rtx_insn *end_blk_1_br = 0;
2453 rtx_insn *end_blk_2_insn = 0;
2454 rtx_insn *start_blk_3_lab = 0;
8f90be4c
NC
2455 int cond;
2456 int br_lab_num;
2457 int blk_size = 0;
2458
2459
2460 /* Check that the first insn is a candidate conditional jump. This is
2461 the one that we'll eliminate. If not, advance to the next insn to
2462 try. */
b64925dc
SB
2463 if (! JUMP_P (first)
2464 || GET_CODE (PATTERN (first)) != SET
2465 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
8f90be4c
NC
2466 return NEXT_INSN (first);
2467
2468 /* Extract some information we need. */
2469 end_blk_1_br = first;
2470 br_pat = PATTERN (end_blk_1_br);
2471
2472 /* Complement the condition since we use the reverse cond. for the insns. */
2473 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2474
2475 /* Determine what kind of branch we have. */
2476 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2477 {
2478 /* A normal branch, so extract label out of first arm. */
2479 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2480 }
2481 else
2482 {
2483 /* An inverse branch, so extract the label out of the 2nd arm
2484 and complement the condition. */
2485 cond = (cond == 0);
2486 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2487 }
2488
2489 /* Scan forward for the start of block 2: it must start with a
2490 label and that label must be the same as the branch target
2491 label from block 1. We don't care about whether block 2 actually
2492 ends with a branch or a label (an uncond. branch is
2493 conditionalizable). */
2494 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2495 {
2496 enum rtx_code code;
2497
2498 code = GET_CODE (insn);
2499
14bc6742 2500 /* Look for the label at the start of block 3. */
8f90be4c
NC
2501 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2502 break;
2503
2504 /* Skip barriers, notes, and conditionalizable insns. If the
2505 insn is not conditionalizable or makes this optimization fail,
2506 just return the next insn so we can start over from that point. */
2507 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2508 return NEXT_INSN (insn);
2509
112cdef5 2510 /* Remember the last real insn before the label (i.e. end of block 2). */
8f90be4c
NC
2511 if (code == JUMP_INSN || code == INSN)
2512 {
2513 blk_size ++;
2514 end_blk_2_insn = insn;
2515 }
2516 }
2517
2518 if (!insn)
2519 return insn;
2520
2521 /* It is possible for this optimization to slow performance if the blocks
2522 are long. This really depends upon whether the branch is likely taken
2523 or not. If the branch is taken, we slow performance in many cases. But,
2524 if the branch is not taken, we always help performance (for a single
2525 block, but for a double block (i.e. when the optimization is re-applied)
2526 this is not true since the 'right thing' depends on the overall length of
2527 the collapsed block). As a compromise, don't apply this optimization on
2528 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2529 the best threshold depends on the latencies of the instructions (i.e.,
2530 the branch penalty). */
2531 if (optimize > 1 && blk_size > 2)
2532 return insn;
2533
2534 /* At this point, we've found the start of block 3 and we know that
2535 it is the destination of the branch from block 1. Also, all
2536 instructions in the block 2 are conditionalizable. So, apply the
2537 conditionalization and delete the branch. */
2538 start_blk_3_lab = insn;
2539
2540 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2541 insn = NEXT_INSN (insn))
2542 {
b32d5189 2543 rtx_insn *newinsn;
8f90be4c 2544
4654c0cf 2545 if (insn->deleted ())
8f90be4c
NC
2546 continue;
2547
14bc6742 2548 /* Try to form a conditional variant of the instruction and emit it. */
8f90be4c
NC
2549 if ((newinsn = emit_new_cond_insn (insn, cond)))
2550 {
2551 if (end_blk_2_insn == insn)
2552 end_blk_2_insn = newinsn;
2553
2554 insn = newinsn;
2555 }
2556 }
2557
2558 /* Note whether we will delete the label starting blk 3 when the jump
2559 gets deleted. If so, we want to re-apply this optimization at the
2560 last real instruction right before the label. */
2561 if (LABEL_NUSES (start_blk_3_lab) == 1)
2562 {
2563 start_blk_3_lab = 0;
2564 }
2565
2566 /* ??? we probably should redistribute the death notes for this insn, esp.
2567 the death of cc, but it doesn't really matter this late in the game.
2568 The peepholes all use is_dead() which will find the correct death
2569 regardless of whether there is a note. */
2570 delete_insn (end_blk_1_br);
2571
2572 if (! start_blk_3_lab)
2573 return end_blk_2_insn;
2574
4816b8e4 2575 /* Return the insn right after the label at the start of block 3. */
8f90be4c
NC
2576 return NEXT_INSN (start_blk_3_lab);
2577}
2578
2579/* Apply the conditionalization of blocks optimization. This is the
2580 outer loop that traverses through the insns scanning for a branch
2581 that signifies an opportunity to apply the optimization. Note that
2582 this optimization is applied late. If we could apply it earlier,
2583 say before cse 2, it may expose more optimization opportunities.
2584 but, the pay back probably isn't really worth the effort (we'd have
2585 to update all reg/flow/notes/links/etc to make it work - and stick it
4816b8e4
NC
2586 in before cse 2). */
2587
8f90be4c 2588static void
08903e08 2589conditionalize_optimization (void)
8f90be4c 2590{
b32d5189 2591 rtx_insn *insn;
8f90be4c 2592
18dbd950 2593 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
8f90be4c
NC
2594 continue;
2595}
2596
18dbd950 2597/* This is to handle loads from the constant pool. */
4816b8e4 2598
18dbd950 2599static void
08903e08 2600mcore_reorg (void)
8f90be4c
NC
2601{
2602 /* Reset this variable. */
2603 current_function_anonymous_args = 0;
2604
8f90be4c
NC
2605 if (optimize == 0)
2606 return;
2607
2608 /* Conditionalize blocks where we can. */
18dbd950 2609 conditionalize_optimization ();
8f90be4c
NC
2610
2611 /* Literal pool generation is now pushed off until the assembler. */
2612}
2613
2614\f
f0f4da32 2615/* Return true if X is something that can be moved directly into r15. */
8f90be4c 2616
f0f4da32 2617bool
08903e08 2618mcore_r15_operand_p (rtx x)
f0f4da32
RS
2619{
2620 switch (GET_CODE (x))
2621 {
2622 case CONST_INT:
2623 return mcore_const_ok_for_inline (INTVAL (x));
8f90be4c 2624
f0f4da32
RS
2625 case REG:
2626 case SUBREG:
2627 case MEM:
2628 return 1;
2629
2630 default:
2631 return 0;
2632 }
2633}
2634
0a2aaacc 2635/* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
f0f4da32 2636 directly move X into it, use r1-r14 as a temporary. */
08903e08 2637
f0f4da32 2638enum reg_class
0a2aaacc 2639mcore_secondary_reload_class (enum reg_class rclass,
ef4bddc2 2640 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
f0f4da32 2641{
0a2aaacc 2642 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
f0f4da32
RS
2643 && !mcore_r15_operand_p (x))
2644 return LRW_REGS;
2645 return NO_REGS;
2646}
8f90be4c 2647
f0f4da32 2648/* Return the reg_class to use when reloading the rtx X into the class
0a2aaacc 2649 RCLASS. If X is too complex to move directly into r15, prefer to
f0f4da32 2650 use LRW_REGS instead. */
08903e08 2651
8f90be4c 2652enum reg_class
0a2aaacc 2653mcore_reload_class (rtx x, enum reg_class rclass)
8f90be4c 2654{
0a2aaacc 2655 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
f0f4da32 2656 return LRW_REGS;
8f90be4c 2657
0a2aaacc 2658 return rclass;
8f90be4c
NC
2659}
2660
2661/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2662 register. Note that the current version doesn't worry about whether
2663 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2664 in r2 matches an SImode in r2. Might think in the future about whether
2665 we want to be able to say something about modes. */
08903e08 2666
8f90be4c 2667int
08903e08 2668mcore_is_same_reg (rtx x, rtx y)
8f90be4c 2669{
14bc6742 2670 /* Strip any and all of the subreg wrappers. */
8f90be4c
NC
2671 while (GET_CODE (x) == SUBREG)
2672 x = SUBREG_REG (x);
2673
2674 while (GET_CODE (y) == SUBREG)
2675 y = SUBREG_REG (y);
2676
2677 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2678 return 1;
2679
2680 return 0;
2681}
2682
c5387660
JM
2683static void
2684mcore_option_override (void)
8f90be4c 2685{
8f90be4c
NC
2686 /* Only the m340 supports little endian code. */
2687 if (TARGET_LITTLE_END && ! TARGET_M340)
78fb8038 2688 target_flags |= MASK_M340;
8f90be4c 2689}
fac0f722 2690
8f90be4c 2691\f
8f90be4c
NC
2692/* Compute the number of word sized registers needed to
2693 hold a function argument of mode MODE and type TYPE. */
08903e08 2694
8f90be4c 2695int
ef4bddc2 2696mcore_num_arg_regs (machine_mode mode, const_tree type)
8f90be4c
NC
2697{
2698 int size;
2699
fe984136 2700 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2701 return 0;
2702
2703 if (type && mode == BLKmode)
2704 size = int_size_in_bytes (type);
2705 else
2706 size = GET_MODE_SIZE (mode);
2707
2708 return ROUND_ADVANCE (size);
2709}
2710
2711static rtx
ef4bddc2 2712handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
8f90be4c
NC
2713{
2714 int size;
2715
696e78bf 2716 /* The MCore ABI defines that a structure whose size is not a whole multiple
8f90be4c
NC
2717 of bytes is passed packed into registers (or spilled onto the stack if
2718 not enough registers are available) with the last few bytes of the
2719 structure being packed, left-justified, into the last register/stack slot.
2720 GCC handles this correctly if the last word is in a stack slot, but we
2721 have to generate a special, PARALLEL RTX if the last word is in an
2722 argument register. */
2723 if (type
2724 && TYPE_MODE (type) == BLKmode
2725 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2726 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2727 && (size % UNITS_PER_WORD != 0)
2728 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2729 {
2730 rtx arg_regs [NPARM_REGS];
2731 int nregs;
2732 rtx result;
2733 rtvec rtvec;
2734
2735 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2736 {
2737 arg_regs [nregs] =
2738 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2739 GEN_INT (nregs * UNITS_PER_WORD));
2740 nregs ++;
2741 }
2742
2743 /* We assume here that NPARM_REGS == 6. The assert checks this. */
819bfe0e 2744 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
8f90be4c
NC
2745 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2746 arg_regs[3], arg_regs[4], arg_regs[5]);
2747
2748 result = gen_rtx_PARALLEL (mode, rtvec);
2749 return result;
2750 }
2751
2752 return gen_rtx_REG (mode, reg);
2753}
2754
2755rtx
cde0f3fd 2756mcore_function_value (const_tree valtype, const_tree func)
8f90be4c 2757{
ef4bddc2 2758 machine_mode mode;
8f90be4c
NC
2759 int unsigned_p;
2760
2761 mode = TYPE_MODE (valtype);
2762
cde0f3fd 2763 /* Since we promote return types, we must promote the mode here too. */
71e0af3c 2764 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
8f90be4c
NC
2765
2766 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2767}
2768
2769/* Define where to put the arguments to a function.
2770 Value is zero to push the argument on the stack,
2771 or a hard register in which to store the argument.
2772
2773 MODE is the argument's machine mode.
2774 TYPE is the data type of the argument (as a tree).
2775 This is null for libcalls where that information may
2776 not be available.
2777 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2778 the preceding args and about the function being called.
2779 NAMED is nonzero if this argument is a named parameter
2780 (otherwise it is an extra parameter matching an ellipsis).
2781
2782 On MCore the first args are normally in registers
2783 and the rest are pushed. Any arg that starts within the first
2784 NPARM_REGS words is at least partially passed in a register unless
2785 its data type forbids. */
08903e08 2786
4665ac17 2787static rtx
ef4bddc2 2788mcore_function_arg (cumulative_args_t cum, machine_mode mode,
4665ac17 2789 const_tree type, bool named)
8f90be4c
NC
2790{
2791 int arg_reg;
2792
88042663 2793 if (! named || mode == VOIDmode)
8f90be4c
NC
2794 return 0;
2795
fe984136 2796 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2797 return 0;
2798
d5cc9181 2799 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2800
2801 if (arg_reg < NPARM_REGS)
2802 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2803
2804 return 0;
2805}
2806
4665ac17 2807static void
ef4bddc2 2808mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
4665ac17
NF
2809 const_tree type, bool named ATTRIBUTE_UNUSED)
2810{
d5cc9181
JR
2811 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2812
4665ac17
NF
2813 *cum = (ROUND_REG (*cum, mode)
2814 + (int)named * mcore_num_arg_regs (mode, type));
2815}
2816
c2ed6cf8 2817static unsigned int
ef4bddc2 2818mcore_function_arg_boundary (machine_mode mode,
c2ed6cf8
NF
2819 const_tree type ATTRIBUTE_UNUSED)
2820{
2821 /* Doubles must be aligned to an 8 byte boundary. */
2822 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2823 ? BIGGEST_ALIGNMENT
2824 : PARM_BOUNDARY);
2825}
2826
78a52f11
RH
2827/* Returns the number of bytes of argument registers required to hold *part*
2828 of a parameter of machine mode MODE and type TYPE (which may be NULL if
dab66575 2829 the type is not known). If the argument fits entirely in the argument
8f90be4c
NC
2830 registers, or entirely on the stack, then 0 is returned. CUM is the
2831 number of argument registers already used by earlier parameters to
2832 the function. */
08903e08 2833
78a52f11 2834static int
ef4bddc2 2835mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
78a52f11 2836 tree type, bool named)
8f90be4c 2837{
d5cc9181 2838 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
8f90be4c
NC
2839
2840 if (named == 0)
2841 return 0;
2842
fe984136 2843 if (targetm.calls.must_pass_in_stack (mode, type))
8f90be4c
NC
2844 return 0;
2845
2846 /* REG is not the *hardware* register number of the register that holds
2847 the argument, it is the *argument* register number. So for example,
2848 the first argument to a function goes in argument register 0, which
2849 translates (for the MCore) into hardware register 2. The second
2850 argument goes into argument register 1, which translates into hardware
2851 register 3, and so on. NPARM_REGS is the number of argument registers
2852 supported by the target, not the maximum hardware register number of
2853 the target. */
2854 if (reg >= NPARM_REGS)
2855 return 0;
2856
2857 /* If the argument fits entirely in registers, return 0. */
2858 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2859 return 0;
2860
2861 /* The argument overflows the number of available argument registers.
2862 Compute how many argument registers have not yet been assigned to
2863 hold an argument. */
2864 reg = NPARM_REGS - reg;
2865
2866 /* Return partially in registers and partially on the stack. */
78a52f11 2867 return reg * UNITS_PER_WORD;
8f90be4c
NC
2868}
2869\f
a0ab749a 2870/* Return nonzero if SYMBOL is marked as being dllexport'd. */
08903e08 2871
8f90be4c 2872int
08903e08 2873mcore_dllexport_name_p (const char * symbol)
8f90be4c
NC
2874{
2875 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2876}
2877
a0ab749a 2878/* Return nonzero if SYMBOL is marked as being dllimport'd. */
08903e08 2879
8f90be4c 2880int
08903e08 2881mcore_dllimport_name_p (const char * symbol)
8f90be4c
NC
2882{
2883 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2884}
2885
2886/* Mark a DECL as being dllexport'd. */
08903e08 2887
8f90be4c 2888static void
08903e08 2889mcore_mark_dllexport (tree decl)
8f90be4c 2890{
cbd3488b 2891 const char * oldname;
8f90be4c
NC
2892 char * newname;
2893 rtx rtlname;
2894 tree idp;
2895
2896 rtlname = XEXP (DECL_RTL (decl), 0);
2897
6e1f65b5
NS
2898 if (GET_CODE (rtlname) == MEM)
2899 rtlname = XEXP (rtlname, 0);
2900 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2901 oldname = XSTR (rtlname, 0);
8f90be4c
NC
2902
2903 if (mcore_dllexport_name_p (oldname))
2904 return; /* Already done. */
2905
5ead67f6 2906 newname = XALLOCAVEC (char, strlen (oldname) + 4);
8f90be4c
NC
2907 sprintf (newname, "@e.%s", oldname);
2908
2909 /* We pass newname through get_identifier to ensure it has a unique
2910 address. RTL processing can sometimes peek inside the symbol ref
2911 and compare the string's addresses to see if two symbols are
2912 identical. */
2913 /* ??? At least I think that's why we do this. */
2914 idp = get_identifier (newname);
2915
2916 XEXP (DECL_RTL (decl), 0) =
f1c25d3b 2917 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
2918}
2919
2920/* Mark a DECL as being dllimport'd. */
08903e08 2921
8f90be4c 2922static void
08903e08 2923mcore_mark_dllimport (tree decl)
8f90be4c 2924{
cbd3488b 2925 const char * oldname;
8f90be4c
NC
2926 char * newname;
2927 tree idp;
2928 rtx rtlname;
2929 rtx newrtl;
2930
2931 rtlname = XEXP (DECL_RTL (decl), 0);
2932
6e1f65b5
NS
2933 if (GET_CODE (rtlname) == MEM)
2934 rtlname = XEXP (rtlname, 0);
2935 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2936 oldname = XSTR (rtlname, 0);
8f90be4c 2937
6e1f65b5
NS
2938 gcc_assert (!mcore_dllexport_name_p (oldname));
2939 if (mcore_dllimport_name_p (oldname))
8f90be4c
NC
2940 return; /* Already done. */
2941
2942 /* ??? One can well ask why we're making these checks here,
2943 and that would be a good question. */
2944
2945 /* Imported variables can't be initialized. */
2946 if (TREE_CODE (decl) == VAR_DECL
2947 && !DECL_VIRTUAL_P (decl)
2948 && DECL_INITIAL (decl))
2949 {
dee15844 2950 error ("initialized variable %q+D is marked dllimport", decl);
8f90be4c
NC
2951 return;
2952 }
2953
2954 /* `extern' needn't be specified with dllimport.
2955 Specify `extern' now and hope for the best. Sigh. */
2956 if (TREE_CODE (decl) == VAR_DECL
2957 /* ??? Is this test for vtables needed? */
2958 && !DECL_VIRTUAL_P (decl))
2959 {
2960 DECL_EXTERNAL (decl) = 1;
2961 TREE_PUBLIC (decl) = 1;
2962 }
2963
5ead67f6 2964 newname = XALLOCAVEC (char, strlen (oldname) + 11);
8f90be4c
NC
2965 sprintf (newname, "@i.__imp_%s", oldname);
2966
2967 /* We pass newname through get_identifier to ensure it has a unique
2968 address. RTL processing can sometimes peek inside the symbol ref
2969 and compare the string's addresses to see if two symbols are
2970 identical. */
2971 /* ??? At least I think that's why we do this. */
2972 idp = get_identifier (newname);
2973
f1c25d3b
KH
2974 newrtl = gen_rtx_MEM (Pmode,
2975 gen_rtx_SYMBOL_REF (Pmode,
8f90be4c
NC
2976 IDENTIFIER_POINTER (idp)));
2977 XEXP (DECL_RTL (decl), 0) = newrtl;
2978}
2979
2980static int
08903e08 2981mcore_dllexport_p (tree decl)
8f90be4c
NC
2982{
2983 if ( TREE_CODE (decl) != VAR_DECL
2984 && TREE_CODE (decl) != FUNCTION_DECL)
2985 return 0;
2986
91d231cb 2987 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2988}
2989
2990static int
08903e08 2991mcore_dllimport_p (tree decl)
8f90be4c
NC
2992{
2993 if ( TREE_CODE (decl) != VAR_DECL
2994 && TREE_CODE (decl) != FUNCTION_DECL)
2995 return 0;
2996
91d231cb 2997 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
8f90be4c
NC
2998}
2999
fb49053f 3000/* We must mark dll symbols specially. Definitions of dllexport'd objects
14bc6742 3001 install some info in the .drective (PE) or .exports (ELF) sections. */
fb49053f
RH
3002
3003static void
08903e08 3004mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
8f90be4c 3005{
8f90be4c
NC
3006 /* Mark the decl so we can tell from the rtl whether the object is
3007 dllexport'd or dllimport'd. */
3008 if (mcore_dllexport_p (decl))
3009 mcore_mark_dllexport (decl);
3010 else if (mcore_dllimport_p (decl))
3011 mcore_mark_dllimport (decl);
3012
3013 /* It might be that DECL has already been marked as dllimport, but
3014 a subsequent definition nullified that. The attribute is gone
3015 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3016 else if ((TREE_CODE (decl) == FUNCTION_DECL
3017 || TREE_CODE (decl) == VAR_DECL)
3018 && DECL_RTL (decl) != NULL_RTX
3019 && GET_CODE (DECL_RTL (decl)) == MEM
3020 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3021 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3022 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3023 {
3cce094d 3024 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
8f90be4c 3025 tree idp = get_identifier (oldname + 9);
f1c25d3b 3026 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
8f90be4c
NC
3027
3028 XEXP (DECL_RTL (decl), 0) = newrtl;
3029
3030 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3031 ??? We leave these alone for now. */
3032 }
3033}
3034
772c5265
RH
3035/* Undo the effects of the above. */
3036
3037static const char *
08903e08 3038mcore_strip_name_encoding (const char * str)
772c5265
RH
3039{
3040 return str + (str[0] == '@' ? 3 : 0);
3041}
3042
8f90be4c
NC
3043/* MCore specific attribute support.
3044 dllexport - for exporting a function/variable that will live in a dll
3045 dllimport - for importing a function/variable from a dll
3046 naked - do not create a function prologue/epilogue. */
8f90be4c 3047
91d231cb
JM
3048/* Handle a "naked" attribute; arguments as in
3049 struct attribute_spec.handler. */
08903e08 3050
91d231cb 3051static tree
08903e08
SB
3052mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3053 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
91d231cb 3054{
d45eae79 3055 if (TREE_CODE (*node) != FUNCTION_DECL)
91d231cb 3056 {
29d08eba
JM
3057 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3058 name);
91d231cb 3059 *no_add_attrs = true;
8f90be4c
NC
3060 }
3061
91d231cb 3062 return NULL_TREE;
8f90be4c
NC
3063}
3064
ae46c4e0
RH
3065/* ??? It looks like this is PE specific? Oh well, this is what the
3066 old code did as well. */
8f90be4c 3067
ae46c4e0 3068static void
08903e08 3069mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8f90be4c
NC
3070{
3071 int len;
0139adca 3072 const char * name;
8f90be4c 3073 char * string;
f27cd94d 3074 const char * prefix;
8f90be4c
NC
3075
3076 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3077
3078 /* Strip off any encoding in name. */
772c5265 3079 name = (* targetm.strip_name_encoding) (name);
8f90be4c
NC
3080
3081 /* The object is put in, for example, section .text$foo.
3082 The linker will then ultimately place them in .text
3083 (everything from the $ on is stripped). */
3084 if (TREE_CODE (decl) == FUNCTION_DECL)
3085 prefix = ".text$";
f710504c 3086 /* For compatibility with EPOC, we ignore the fact that the
8f90be4c 3087 section might have relocs against it. */
4e4d733e 3088 else if (decl_readonly_section (decl, 0))
8f90be4c
NC
3089 prefix = ".rdata$";
3090 else
3091 prefix = ".data$";
3092
3093 len = strlen (name) + strlen (prefix);
5ead67f6 3094 string = XALLOCAVEC (char, len + 1);
8f90be4c
NC
3095
3096 sprintf (string, "%s%s", prefix, name);
3097
f961457f 3098 set_decl_section_name (decl, string);
8f90be4c
NC
3099}
3100
3101int
08903e08 3102mcore_naked_function_p (void)
8f90be4c 3103{
91d231cb 3104 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
8f90be4c 3105}
7c262518 3106
d45eae79
SL
3107static bool
3108mcore_warn_func_return (tree decl)
3109{
3110 /* Naked functions are implemented entirely in assembly, including the
3111 return sequence, so suppress warnings about this. */
3112 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3113}
3114
ede75ee8 3115#ifdef OBJECT_FORMAT_ELF
7c262518 3116static void
c18a5b6c
MM
3117mcore_asm_named_section (const char *name,
3118 unsigned int flags ATTRIBUTE_UNUSED,
3119 tree decl ATTRIBUTE_UNUSED)
7c262518
RH
3120{
3121 fprintf (asm_out_file, "\t.section %s\n", name);
3122}
ede75ee8 3123#endif /* OBJECT_FORMAT_ELF */
09a2b93a 3124
dc7efe6e
KH
3125/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3126
09a2b93a
KH
3127static void
3128mcore_external_libcall (rtx fun)
3129{
3130 fprintf (asm_out_file, "\t.import\t");
3131 assemble_name (asm_out_file, XSTR (fun, 0));
3132 fprintf (asm_out_file, "\n");
3133}
3134
dc7efe6e
KH
3135/* Worker function for TARGET_RETURN_IN_MEMORY. */
3136
09a2b93a 3137static bool
586de218 3138mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
09a2b93a 3139{
586de218 3140 const HOST_WIDE_INT size = int_size_in_bytes (type);
78bc94a2 3141 return (size == -1 || size > 2 * UNITS_PER_WORD);
09a2b93a 3142}
71e0af3c
RH
3143
3144/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3145 Output assembler code for a block containing the constant parts
3146 of a trampoline, leaving space for the variable parts.
3147
3148 On the MCore, the trampoline looks like:
3149 lrw r1, function
3150 lrw r13, area
3151 jmp r13
3152 or r0, r0
3153 .literals */
3154
3155static void
3156mcore_asm_trampoline_template (FILE *f)
3157{
3158 fprintf (f, "\t.short 0x7102\n");
3159 fprintf (f, "\t.short 0x7d02\n");
3160 fprintf (f, "\t.short 0x00cd\n");
3161 fprintf (f, "\t.short 0x1e00\n");
3162 fprintf (f, "\t.long 0\n");
3163 fprintf (f, "\t.long 0\n");
3164}
3165
3166/* Worker function for TARGET_TRAMPOLINE_INIT. */
3167
3168static void
3169mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3170{
3171 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3172 rtx mem;
3173
3174 emit_block_move (m_tramp, assemble_trampoline_template (),
3175 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3176
3177 mem = adjust_address (m_tramp, SImode, 8);
3178 emit_move_insn (mem, chain_value);
3179 mem = adjust_address (m_tramp, SImode, 12);
3180 emit_move_insn (mem, fnaddr);
3181}
1a627b35
RS
3182
3183/* Implement TARGET_LEGITIMATE_CONSTANT_P
3184
3185 On the MCore, allow anything but a double. */
3186
3187static bool
ef4bddc2 3188mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1a627b35
RS
3189{
3190 return GET_CODE (x) != CONST_DOUBLE;
3191}
e7c6980e
AS
3192
3193/* Helper function for `mcore_legitimate_address_p'. */
3194
3195static bool
3196mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3197{
3198 if (strict_p)
3199 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3200 else
3201 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3202}
3203
3204static bool
3205mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3206{
3207 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3208}
3209
3210/* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3211 and for DI is 0..56 because we use two SI loads, etc. */
3212
3213static bool
3214mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3215{
3216 if (CONST_INT_P (op))
3217 {
3218 if (GET_MODE_SIZE (mode) >= 4
3219 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3220 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3221 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3222 return true;
3223 if (GET_MODE_SIZE (mode) == 2
3224 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3225 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3226 return true;
3227 if (GET_MODE_SIZE (mode) == 1
3228 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3229 return true;
3230 }
3231 return false;
3232}
3233
3234
3235/* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3236
3237 Allow REG
3238 REG + disp */
3239
3240static bool
3241mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3242 addr_space_t as)
3243{
3244 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3245
3246 if (mcore_base_register_rtx_p (x, strict_p))
3247 return true;
3248 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3249 {
3250 rtx xop0 = XEXP (x, 0);
3251 rtx xop1 = XEXP (x, 1);
3252 if (mcore_base_register_rtx_p (xop0, strict_p)
3253 && mcore_legitimate_index_p (mode, xop1))
3254 return true;
3255 if (mcore_base_register_rtx_p (xop1, strict_p)
3256 && mcore_legitimate_index_p (mode, xop0))
3257 return true;
3258 }
3259
3260 return false;
3261}
3262