]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/mcore/mcore.c
Use function_arg_info for TARGET_FUNCTION_(INCOMING_)ARG
[thirdparty/gcc.git] / gcc / config / mcore / mcore.c
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
8 by the Free Software Foundation; either version 3, or (at your
9 option) any later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "attribs.h"
34 #include "emit-rtl.h"
35 #include "diagnostic-core.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "calls.h"
39 #include "mcore.h"
40 #include "output.h"
41 #include "explow.h"
42 #include "expr.h"
43 #include "cfgrtl.h"
44 #include "builtins.h"
45 #include "regs.h"
46
47 /* This file should be included last. */
48 #include "target-def.h"
49
50 /* For dumping information about frame sizes. */
51 char * mcore_current_function_name = 0;
52 long mcore_current_compilation_timestamp = 0;
53
54 /* Global variables for machine-dependent things. */
55
56 /* Provides the class number of the smallest class containing
57 reg number. */
58 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
59 {
60 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
62 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
63 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
64 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
65 };
66
67 struct mcore_frame
68 {
69 int arg_size; /* Stdarg spills (bytes). */
70 int reg_size; /* Non-volatile reg saves (bytes). */
71 int reg_mask; /* Non-volatile reg saves. */
72 int local_size; /* Locals. */
73 int outbound_size; /* Arg overflow on calls out. */
74 int pad_outbound;
75 int pad_local;
76 int pad_reg;
77 /* Describe the steps we'll use to grow it. */
78 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
79 int growth[MAX_STACK_GROWS];
80 int arg_offset;
81 int reg_offset;
82 int reg_growth;
83 int local_growth;
84 };
85
86 typedef enum
87 {
88 COND_NO,
89 COND_MOV_INSN,
90 COND_CLR_INSN,
91 COND_INC_INSN,
92 COND_DEC_INSN,
93 COND_BRANCH_INSN
94 }
95 cond_type;
96
97 static void output_stack_adjust (int, int);
98 static int calc_live_regs (int *);
99 static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
100 static const char * output_inline_const (machine_mode, rtx *);
101 static void layout_mcore_frame (struct mcore_frame *);
102 static void mcore_setup_incoming_varargs (cumulative_args_t,
103 const function_arg_info &,
104 int *, int);
105 static cond_type is_cond_candidate (rtx);
106 static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
107 static rtx_insn *conditionalize_block (rtx_insn *);
108 static void conditionalize_optimization (void);
109 static void mcore_reorg (void);
110 static rtx handle_structs_in_regs (machine_mode, const_tree, int);
111 static void mcore_mark_dllexport (tree);
112 static void mcore_mark_dllimport (tree);
113 static int mcore_dllexport_p (tree);
114 static int mcore_dllimport_p (tree);
115 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
116 #ifdef OBJECT_FORMAT_ELF
117 static void mcore_asm_named_section (const char *,
118 unsigned int, tree);
119 #endif
120 static void mcore_print_operand (FILE *, rtx, int);
121 static void mcore_print_operand_address (FILE *, machine_mode, rtx);
122 static bool mcore_print_operand_punct_valid_p (unsigned char code);
123 static void mcore_unique_section (tree, int);
124 static void mcore_encode_section_info (tree, rtx, int);
125 static const char *mcore_strip_name_encoding (const char *);
126 static int mcore_const_costs (rtx, RTX_CODE);
127 static int mcore_and_cost (rtx);
128 static int mcore_ior_cost (rtx);
129 static bool mcore_rtx_costs (rtx, machine_mode, int, int,
130 int *, bool);
131 static void mcore_external_libcall (rtx);
132 static bool mcore_return_in_memory (const_tree, const_tree);
133 static int mcore_arg_partial_bytes (cumulative_args_t,
134 const function_arg_info &);
135 static rtx mcore_function_arg (cumulative_args_t,
136 const function_arg_info &);
137 static void mcore_function_arg_advance (cumulative_args_t,
138 machine_mode,
139 const_tree, bool);
140 static unsigned int mcore_function_arg_boundary (machine_mode,
141 const_tree);
142 static void mcore_asm_trampoline_template (FILE *);
143 static void mcore_trampoline_init (rtx, tree, rtx);
144 static bool mcore_warn_func_return (tree);
145 static void mcore_option_override (void);
146 static bool mcore_legitimate_constant_p (machine_mode, rtx);
147 static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
148 addr_space_t);
149 static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
150 static bool mcore_modes_tieable_p (machine_mode, machine_mode);
151 \f
152 /* MCore specific attributes. */
153
154 static const struct attribute_spec mcore_attribute_table[] =
155 {
156 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
157 affects_type_identity, handler, exclude } */
158 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
159 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
160 { "naked", 0, 0, true, false, false, false,
161 mcore_handle_naked_attribute, NULL },
162 { NULL, 0, 0, false, false, false, false, NULL, NULL }
163 };
164 \f
165 /* Initialize the GCC target structure. */
166 #undef TARGET_ASM_EXTERNAL_LIBCALL
167 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
168
169 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
170 #undef TARGET_MERGE_DECL_ATTRIBUTES
171 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
172 #endif
173
174 #ifdef OBJECT_FORMAT_ELF
175 #undef TARGET_ASM_UNALIGNED_HI_OP
176 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
177 #undef TARGET_ASM_UNALIGNED_SI_OP
178 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
179 #endif
180
181 #undef TARGET_PRINT_OPERAND
182 #define TARGET_PRINT_OPERAND mcore_print_operand
183 #undef TARGET_PRINT_OPERAND_ADDRESS
184 #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
185 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
186 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
187
188 #undef TARGET_ATTRIBUTE_TABLE
189 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
190 #undef TARGET_ASM_UNIQUE_SECTION
191 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
192 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
193 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
194 #undef TARGET_ENCODE_SECTION_INFO
195 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
196 #undef TARGET_STRIP_NAME_ENCODING
197 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
198 #undef TARGET_RTX_COSTS
199 #define TARGET_RTX_COSTS mcore_rtx_costs
200 #undef TARGET_ADDRESS_COST
201 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
202 #undef TARGET_MACHINE_DEPENDENT_REORG
203 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
204
205 #undef TARGET_PROMOTE_FUNCTION_MODE
206 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
207 #undef TARGET_PROMOTE_PROTOTYPES
208 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
209
210 #undef TARGET_RETURN_IN_MEMORY
211 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
212 #undef TARGET_MUST_PASS_IN_STACK
213 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
214 #undef TARGET_PASS_BY_REFERENCE
215 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
216 #undef TARGET_ARG_PARTIAL_BYTES
217 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
218 #undef TARGET_FUNCTION_ARG
219 #define TARGET_FUNCTION_ARG mcore_function_arg
220 #undef TARGET_FUNCTION_ARG_ADVANCE
221 #define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
222 #undef TARGET_FUNCTION_ARG_BOUNDARY
223 #define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
224
225 #undef TARGET_SETUP_INCOMING_VARARGS
226 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
227
228 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
229 #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
230 #undef TARGET_TRAMPOLINE_INIT
231 #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
232
233 #undef TARGET_OPTION_OVERRIDE
234 #define TARGET_OPTION_OVERRIDE mcore_option_override
235
236 #undef TARGET_LEGITIMATE_CONSTANT_P
237 #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
238 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
239 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
240
241 #undef TARGET_LRA_P
242 #define TARGET_LRA_P hook_bool_void_false
243
244 #undef TARGET_WARN_FUNC_RETURN
245 #define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
246
247 #undef TARGET_HARD_REGNO_MODE_OK
248 #define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
249
250 #undef TARGET_MODES_TIEABLE_P
251 #define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
252
253 #undef TARGET_CONSTANT_ALIGNMENT
254 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
255
256 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
257 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
258
259 struct gcc_target targetm = TARGET_INITIALIZER;
260 \f
261 /* Adjust the stack and return the number of bytes taken to do it. */
262 static void
263 output_stack_adjust (int direction, int size)
264 {
265 /* If extending stack a lot, we do it incrementally. */
266 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
267 {
268 rtx tmp = gen_rtx_REG (SImode, 1);
269 rtx memref;
270
271 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
272 do
273 {
274 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
275 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
276 MEM_VOLATILE_P (memref) = 1;
277 emit_insn (gen_movsi (memref, stack_pointer_rtx));
278 size -= mcore_stack_increment;
279 }
280 while (size > mcore_stack_increment);
281
282 /* SIZE is now the residual for the last adjustment,
283 which doesn't require a probe. */
284 }
285
286 if (size)
287 {
288 rtx insn;
289 rtx val = GEN_INT (size);
290
291 if (size > 32)
292 {
293 rtx nval = gen_rtx_REG (SImode, 1);
294 emit_insn (gen_movsi (nval, val));
295 val = nval;
296 }
297
298 if (direction > 0)
299 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
300 else
301 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
302
303 emit_insn (insn);
304 }
305 }
306
307 /* Work out the registers which need to be saved,
308 both as a mask and a count. */
309
310 static int
311 calc_live_regs (int * count)
312 {
313 int reg;
314 int live_regs_mask = 0;
315
316 * count = 0;
317
318 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
319 {
320 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
321 {
322 (*count)++;
323 live_regs_mask |= (1 << reg);
324 }
325 }
326
327 return live_regs_mask;
328 }
329
330 /* Print the operand address in x to the stream. */
331
332 static void
333 mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
334 {
335 switch (GET_CODE (x))
336 {
337 case REG:
338 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
339 break;
340
341 case PLUS:
342 {
343 rtx base = XEXP (x, 0);
344 rtx index = XEXP (x, 1);
345
346 if (GET_CODE (base) != REG)
347 {
348 /* Ensure that BASE is a register (one of them must be). */
349 rtx temp = base;
350 base = index;
351 index = temp;
352 }
353
354 switch (GET_CODE (index))
355 {
356 case CONST_INT:
357 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
358 reg_names[REGNO(base)], INTVAL (index));
359 break;
360
361 default:
362 gcc_unreachable ();
363 }
364 }
365
366 break;
367
368 default:
369 output_addr_const (stream, x);
370 break;
371 }
372 }
373
374 static bool
375 mcore_print_operand_punct_valid_p (unsigned char code)
376 {
377 return (code == '.' || code == '#' || code == '*' || code == '^'
378 || code == '!');
379 }
380
381 /* Print operand x (an rtx) in assembler syntax to file stream
382 according to modifier code.
383
384 'R' print the next register or memory location along, i.e. the lsw in
385 a double word value
386 'O' print a constant without the #
387 'M' print a constant as its negative
388 'P' print log2 of a power of two
389 'Q' print log2 of an inverse of a power of two
390 'U' print register for ldm/stm instruction
391 'X' print byte number for xtrbN instruction. */
392
393 static void
394 mcore_print_operand (FILE * stream, rtx x, int code)
395 {
396 switch (code)
397 {
398 case 'N':
399 if (INTVAL(x) == -1)
400 fprintf (asm_out_file, "32");
401 else
402 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
403 break;
404 case 'P':
405 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
406 break;
407 case 'Q':
408 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
409 break;
410 case 'O':
411 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
412 break;
413 case 'M':
414 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
415 break;
416 case 'R':
417 /* Next location along in memory or register. */
418 switch (GET_CODE (x))
419 {
420 case REG:
421 fputs (reg_names[REGNO (x) + 1], (stream));
422 break;
423 case MEM:
424 mcore_print_operand_address
425 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
426 break;
427 default:
428 gcc_unreachable ();
429 }
430 break;
431 case 'U':
432 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
433 reg_names[REGNO (x) + 3]);
434 break;
435 case 'x':
436 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
437 break;
438 case 'X':
439 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
440 break;
441
442 default:
443 switch (GET_CODE (x))
444 {
445 case REG:
446 fputs (reg_names[REGNO (x)], (stream));
447 break;
448 case MEM:
449 output_address (GET_MODE (x), XEXP (x, 0));
450 break;
451 default:
452 output_addr_const (stream, x);
453 break;
454 }
455 break;
456 }
457 }
458
459 /* What does a constant cost ? */
460
461 static int
462 mcore_const_costs (rtx exp, enum rtx_code code)
463 {
464 HOST_WIDE_INT val = INTVAL (exp);
465
466 /* Easy constants. */
467 if ( CONST_OK_FOR_I (val)
468 || CONST_OK_FOR_M (val)
469 || CONST_OK_FOR_N (val)
470 || (code == PLUS && CONST_OK_FOR_L (val)))
471 return 1;
472 else if (code == AND
473 && ( CONST_OK_FOR_M (~val)
474 || CONST_OK_FOR_N (~val)))
475 return 2;
476 else if (code == PLUS
477 && ( CONST_OK_FOR_I (-val)
478 || CONST_OK_FOR_M (-val)
479 || CONST_OK_FOR_N (-val)))
480 return 2;
481
482 return 5;
483 }
484
485 /* What does an and instruction cost - we do this b/c immediates may
486 have been relaxed. We want to ensure that cse will cse relaxed immeds
487 out. Otherwise we'll get bad code (multiple reloads of the same const). */
488
489 static int
490 mcore_and_cost (rtx x)
491 {
492 HOST_WIDE_INT val;
493
494 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
495 return 2;
496
497 val = INTVAL (XEXP (x, 1));
498
499 /* Do it directly. */
500 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
501 return 2;
502 /* Takes one instruction to load. */
503 else if (const_ok_for_mcore (val))
504 return 3;
505 /* Takes two instructions to load. */
506 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
507 return 4;
508
509 /* Takes a lrw to load. */
510 return 5;
511 }
512
513 /* What does an or cost - see and_cost(). */
514
515 static int
516 mcore_ior_cost (rtx x)
517 {
518 HOST_WIDE_INT val;
519
520 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
521 return 2;
522
523 val = INTVAL (XEXP (x, 1));
524
525 /* Do it directly with bclri. */
526 if (CONST_OK_FOR_M (val))
527 return 2;
528 /* Takes one instruction to load. */
529 else if (const_ok_for_mcore (val))
530 return 3;
531 /* Takes two instructions to load. */
532 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
533 return 4;
534
535 /* Takes a lrw to load. */
536 return 5;
537 }
538
539 static bool
540 mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
541 int opno ATTRIBUTE_UNUSED,
542 int * total, bool speed ATTRIBUTE_UNUSED)
543 {
544 int code = GET_CODE (x);
545
546 switch (code)
547 {
548 case CONST_INT:
549 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
550 return true;
551 case CONST:
552 case LABEL_REF:
553 case SYMBOL_REF:
554 *total = 5;
555 return true;
556 case CONST_DOUBLE:
557 *total = 10;
558 return true;
559
560 case AND:
561 *total = COSTS_N_INSNS (mcore_and_cost (x));
562 return true;
563
564 case IOR:
565 *total = COSTS_N_INSNS (mcore_ior_cost (x));
566 return true;
567
568 case DIV:
569 case UDIV:
570 case MOD:
571 case UMOD:
572 case FLOAT:
573 case FIX:
574 *total = COSTS_N_INSNS (100);
575 return true;
576
577 default:
578 return false;
579 }
580 }
581
582 /* Prepare the operands for a comparison. Return whether the branch/setcc
583 should reverse the operands. */
584
585 bool
586 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
587 {
588 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
589 bool invert;
590
591 if (GET_CODE (op1) == CONST_INT)
592 {
593 HOST_WIDE_INT val = INTVAL (op1);
594
595 switch (code)
596 {
597 case GTU:
598 /* Unsigned > 0 is the same as != 0; everything else is converted
599 below to LEU (reversed cmphs). */
600 if (val == 0)
601 code = NE;
602 break;
603
604 /* Check whether (LE A imm) can become (LT A imm + 1),
605 or (GT A imm) can become (GE A imm + 1). */
606 case GT:
607 case LE:
608 if (CONST_OK_FOR_J (val + 1))
609 {
610 op1 = GEN_INT (val + 1);
611 code = code == LE ? LT : GE;
612 }
613 break;
614
615 default:
616 break;
617 }
618 }
619
620 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
621 op1 = force_reg (SImode, op1);
622
623 /* cmpnei: 0-31 (K immediate)
624 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
625 invert = false;
626 switch (code)
627 {
628 case EQ: /* Use inverted condition, cmpne. */
629 code = NE;
630 invert = true;
631 /* FALLTHRU */
632
633 case NE: /* Use normal condition, cmpne. */
634 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
635 op1 = force_reg (SImode, op1);
636 break;
637
638 case LE: /* Use inverted condition, reversed cmplt. */
639 code = GT;
640 invert = true;
641 /* FALLTHRU */
642
643 case GT: /* Use normal condition, reversed cmplt. */
644 if (GET_CODE (op1) == CONST_INT)
645 op1 = force_reg (SImode, op1);
646 break;
647
648 case GE: /* Use inverted condition, cmplt. */
649 code = LT;
650 invert = true;
651 /* FALLTHRU */
652
653 case LT: /* Use normal condition, cmplt. */
654 if (GET_CODE (op1) == CONST_INT &&
655 /* covered by btsti x,31. */
656 INTVAL (op1) != 0 &&
657 ! CONST_OK_FOR_J (INTVAL (op1)))
658 op1 = force_reg (SImode, op1);
659 break;
660
661 case GTU: /* Use inverted condition, cmple. */
662 /* We coped with unsigned > 0 above. */
663 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
664 code = LEU;
665 invert = true;
666 /* FALLTHRU */
667
668 case LEU: /* Use normal condition, reversed cmphs. */
669 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
670 op1 = force_reg (SImode, op1);
671 break;
672
673 case LTU: /* Use inverted condition, cmphs. */
674 code = GEU;
675 invert = true;
676 /* FALLTHRU */
677
678 case GEU: /* Use normal condition, cmphs. */
679 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
680 op1 = force_reg (SImode, op1);
681 break;
682
683 default:
684 break;
685 }
686
687 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
688 return invert;
689 }
690
691 int
692 mcore_symbolic_address_p (rtx x)
693 {
694 switch (GET_CODE (x))
695 {
696 case SYMBOL_REF:
697 case LABEL_REF:
698 return 1;
699 case CONST:
700 x = XEXP (x, 0);
701 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
702 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
703 && GET_CODE (XEXP (x, 1)) == CONST_INT);
704 default:
705 return 0;
706 }
707 }
708
709 /* Functions to output assembly code for a function call. */
710
711 char *
712 mcore_output_call (rtx operands[], int index)
713 {
714 static char buffer[20];
715 rtx addr = operands [index];
716
717 if (REG_P (addr))
718 {
719 if (TARGET_CG_DATA)
720 {
721 gcc_assert (mcore_current_function_name);
722
723 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
724 "unknown", 1);
725 }
726
727 sprintf (buffer, "jsr\t%%%d", index);
728 }
729 else
730 {
731 if (TARGET_CG_DATA)
732 {
733 gcc_assert (mcore_current_function_name);
734 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
735
736 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
737 XSTR (addr, 0), 0);
738 }
739
740 sprintf (buffer, "jbsr\t%%%d", index);
741 }
742
743 return buffer;
744 }
745
746 /* Can we load a constant with a single instruction ? */
747
748 int
749 const_ok_for_mcore (HOST_WIDE_INT value)
750 {
751 if (value >= 0 && value <= 127)
752 return 1;
753
754 /* Try exact power of two. */
755 if (CONST_OK_FOR_M (value))
756 return 1;
757
758 /* Try exact power of two - 1. */
759 if (CONST_OK_FOR_N (value) && value != -1)
760 return 1;
761
762 return 0;
763 }
764
765 /* Can we load a constant inline with up to 2 instructions ? */
766
767 int
768 mcore_const_ok_for_inline (HOST_WIDE_INT value)
769 {
770 HOST_WIDE_INT x, y;
771
772 return try_constant_tricks (value, & x, & y) > 0;
773 }
774
775 /* Are we loading the constant using a not ? */
776
777 int
778 mcore_const_trick_uses_not (HOST_WIDE_INT value)
779 {
780 HOST_WIDE_INT x, y;
781
782 return try_constant_tricks (value, & x, & y) == 2;
783 }
784
785 /* Try tricks to load a constant inline and return the trick number if
786 success (0 is non-inlinable).
787
788 0: not inlinable
789 1: single instruction (do the usual thing)
790 2: single insn followed by a 'not'
791 3: single insn followed by a subi
792 4: single insn followed by an addi
793 5: single insn followed by rsubi
794 6: single insn followed by bseti
795 7: single insn followed by bclri
796 8: single insn followed by rotli
797 9: single insn followed by lsli
798 10: single insn followed by ixh
799 11: single insn followed by ixw. */
800
801 static int
802 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
803 {
804 HOST_WIDE_INT i;
805 unsigned HOST_WIDE_INT bit, shf, rot;
806
807 if (const_ok_for_mcore (value))
808 return 1; /* Do the usual thing. */
809
810 if (! TARGET_HARDLIT)
811 return 0;
812
813 if (const_ok_for_mcore (~value))
814 {
815 *x = ~value;
816 return 2;
817 }
818
819 for (i = 1; i <= 32; i++)
820 {
821 if (const_ok_for_mcore (value - i))
822 {
823 *x = value - i;
824 *y = i;
825
826 return 3;
827 }
828
829 if (const_ok_for_mcore (value + i))
830 {
831 *x = value + i;
832 *y = i;
833
834 return 4;
835 }
836 }
837
838 bit = 0x80000000ULL;
839
840 for (i = 0; i <= 31; i++)
841 {
842 if (const_ok_for_mcore (i - value))
843 {
844 *x = i - value;
845 *y = i;
846
847 return 5;
848 }
849
850 if (const_ok_for_mcore (value & ~bit))
851 {
852 *y = bit;
853 *x = value & ~bit;
854 return 6;
855 }
856
857 if (const_ok_for_mcore (value | bit))
858 {
859 *y = ~bit;
860 *x = value | bit;
861
862 return 7;
863 }
864
865 bit >>= 1;
866 }
867
868 shf = value;
869 rot = value;
870
871 for (i = 1; i < 31; i++)
872 {
873 int c;
874
875 /* MCore has rotate left. */
876 c = rot << 31;
877 rot >>= 1;
878 rot &= 0x7FFFFFFF;
879 rot |= c; /* Simulate rotate. */
880
881 if (const_ok_for_mcore (rot))
882 {
883 *y = i;
884 *x = rot;
885
886 return 8;
887 }
888
889 if (shf & 1)
890 shf = 0; /* Can't use logical shift, low order bit is one. */
891
892 shf >>= 1;
893
894 if (shf != 0 && const_ok_for_mcore (shf))
895 {
896 *y = i;
897 *x = shf;
898
899 return 9;
900 }
901 }
902
903 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
904 {
905 *x = value / 3;
906
907 return 10;
908 }
909
910 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
911 {
912 *x = value / 5;
913
914 return 11;
915 }
916
917 return 0;
918 }
919
920 /* Check whether reg is dead at first. This is done by searching ahead
921 for either the next use (i.e., reg is live), a death note, or a set of
922 reg. Don't just use dead_or_set_p() since reload does not always mark
923 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
924 can ignore subregs by extracting the actual register. BRC */
925
926 int
927 mcore_is_dead (rtx_insn *first, rtx reg)
928 {
929 rtx_insn *insn;
930
931 /* For mcore, subregs can't live independently of their parent regs. */
932 if (GET_CODE (reg) == SUBREG)
933 reg = SUBREG_REG (reg);
934
935 /* Dies immediately. */
936 if (dead_or_set_p (first, reg))
937 return 1;
938
939 /* Look for conclusive evidence of live/death, otherwise we have
940 to assume that it is live. */
941 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
942 {
943 if (JUMP_P (insn))
944 return 0; /* We lose track, assume it is alive. */
945
946 else if (CALL_P (insn))
947 {
948 /* Call's might use it for target or register parms. */
949 if (reg_referenced_p (reg, PATTERN (insn))
950 || find_reg_fusage (insn, USE, reg))
951 return 0;
952 else if (dead_or_set_p (insn, reg))
953 return 1;
954 }
955 else if (NONJUMP_INSN_P (insn))
956 {
957 if (reg_referenced_p (reg, PATTERN (insn)))
958 return 0;
959 else if (dead_or_set_p (insn, reg))
960 return 1;
961 }
962 }
963
964 /* No conclusive evidence either way, we cannot take the chance
965 that control flow hid the use from us -- "I'm not dead yet". */
966 return 0;
967 }
968
969 /* Count the number of ones in mask. */
970
971 int
972 mcore_num_ones (HOST_WIDE_INT mask)
973 {
974 /* A trick to count set bits recently posted on comp.compilers. */
975 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
976 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
977 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
978 mask = ((mask >> 8) + mask);
979
980 return (mask + (mask >> 16)) & 0xff;
981 }
982
983 /* Count the number of zeros in mask. */
984
985 int
986 mcore_num_zeros (HOST_WIDE_INT mask)
987 {
988 return 32 - mcore_num_ones (mask);
989 }
990
991 /* Determine byte being masked. */
992
993 int
994 mcore_byte_offset (unsigned int mask)
995 {
996 if (mask == 0x00ffffffL)
997 return 0;
998 else if (mask == 0xff00ffffL)
999 return 1;
1000 else if (mask == 0xffff00ffL)
1001 return 2;
1002 else if (mask == 0xffffff00L)
1003 return 3;
1004
1005 return -1;
1006 }
1007
1008 /* Determine halfword being masked. */
1009
1010 int
1011 mcore_halfword_offset (unsigned int mask)
1012 {
1013 if (mask == 0x0000ffffL)
1014 return 0;
1015 else if (mask == 0xffff0000L)
1016 return 1;
1017
1018 return -1;
1019 }
1020
1021 /* Output a series of bseti's corresponding to mask. */
1022
1023 const char *
1024 mcore_output_bseti (rtx dst, int mask)
1025 {
1026 rtx out_operands[2];
1027 int bit;
1028
1029 out_operands[0] = dst;
1030
1031 for (bit = 0; bit < 32; bit++)
1032 {
1033 if ((mask & 0x1) == 0x1)
1034 {
1035 out_operands[1] = GEN_INT (bit);
1036
1037 output_asm_insn ("bseti\t%0,%1", out_operands);
1038 }
1039 mask >>= 1;
1040 }
1041
1042 return "";
1043 }
1044
1045 /* Output a series of bclri's corresponding to mask. */
1046
1047 const char *
1048 mcore_output_bclri (rtx dst, int mask)
1049 {
1050 rtx out_operands[2];
1051 int bit;
1052
1053 out_operands[0] = dst;
1054
1055 for (bit = 0; bit < 32; bit++)
1056 {
1057 if ((mask & 0x1) == 0x0)
1058 {
1059 out_operands[1] = GEN_INT (bit);
1060
1061 output_asm_insn ("bclri\t%0,%1", out_operands);
1062 }
1063
1064 mask >>= 1;
1065 }
1066
1067 return "";
1068 }
1069
1070 /* Output a conditional move of two constants that are +/- 1 within each
1071 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1072 really worth the effort. */
1073
1074 const char *
1075 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1076 {
1077 HOST_WIDE_INT load_value;
1078 HOST_WIDE_INT adjust_value;
1079 rtx out_operands[4];
1080
1081 out_operands[0] = operands[0];
1082
1083 /* Check to see which constant is loadable. */
1084 if (const_ok_for_mcore (INTVAL (operands[1])))
1085 {
1086 out_operands[1] = operands[1];
1087 out_operands[2] = operands[2];
1088 }
1089 else if (const_ok_for_mcore (INTVAL (operands[2])))
1090 {
1091 out_operands[1] = operands[2];
1092 out_operands[2] = operands[1];
1093
1094 /* Complement test since constants are swapped. */
1095 cmp_t = (cmp_t == 0);
1096 }
1097 load_value = INTVAL (out_operands[1]);
1098 adjust_value = INTVAL (out_operands[2]);
1099
1100 /* First output the test if folded into the pattern. */
1101
1102 if (test)
1103 output_asm_insn (test, operands);
1104
1105 /* Load the constant - for now, only support constants that can be
1106 generated with a single instruction. maybe add general inlinable
1107 constants later (this will increase the # of patterns since the
1108 instruction sequence has a different length attribute). */
1109 if (load_value >= 0 && load_value <= 127)
1110 output_asm_insn ("movi\t%0,%1", out_operands);
1111 else if (CONST_OK_FOR_M (load_value))
1112 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1113 else if (CONST_OK_FOR_N (load_value))
1114 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1115
1116 /* Output the constant adjustment. */
1117 if (load_value > adjust_value)
1118 {
1119 if (cmp_t)
1120 output_asm_insn ("decf\t%0", out_operands);
1121 else
1122 output_asm_insn ("dect\t%0", out_operands);
1123 }
1124 else
1125 {
1126 if (cmp_t)
1127 output_asm_insn ("incf\t%0", out_operands);
1128 else
1129 output_asm_insn ("inct\t%0", out_operands);
1130 }
1131
1132 return "";
1133 }
1134
1135 /* Outputs the peephole for moving a constant that gets not'ed followed
1136 by an and (i.e. combine the not and the and into andn). BRC */
1137
1138 const char *
1139 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1140 {
1141 HOST_WIDE_INT x, y;
1142 rtx out_operands[3];
1143 const char * load_op;
1144 char buf[256];
1145 int trick_no;
1146
1147 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1148 gcc_assert (trick_no == 2);
1149
1150 out_operands[0] = operands[0];
1151 out_operands[1] = GEN_INT (x);
1152 out_operands[2] = operands[2];
1153
1154 if (x >= 0 && x <= 127)
1155 load_op = "movi\t%0,%1";
1156
1157 /* Try exact power of two. */
1158 else if (CONST_OK_FOR_M (x))
1159 load_op = "bgeni\t%0,%P1";
1160
1161 /* Try exact power of two - 1. */
1162 else if (CONST_OK_FOR_N (x))
1163 load_op = "bmaski\t%0,%N1";
1164
1165 else
1166 {
1167 load_op = "BADMOVI-andn\t%0, %1";
1168 gcc_unreachable ();
1169 }
1170
1171 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1172 output_asm_insn (buf, out_operands);
1173
1174 return "";
1175 }
1176
1177 /* Output an inline constant. */
1178
1179 static const char *
1180 output_inline_const (machine_mode mode, rtx operands[])
1181 {
1182 HOST_WIDE_INT x = 0, y = 0;
1183 int trick_no;
1184 rtx out_operands[3];
1185 char buf[256];
1186 char load_op[256];
1187 const char *dst_fmt;
1188 HOST_WIDE_INT value;
1189
1190 value = INTVAL (operands[1]);
1191
1192 trick_no = try_constant_tricks (value, &x, &y);
1193 /* lrw's are handled separately: Large inlinable constants never get
1194 turned into lrw's. Our caller uses try_constant_tricks to back
1195 off to an lrw rather than calling this routine. */
1196 gcc_assert (trick_no != 0);
1197
1198 if (trick_no == 1)
1199 x = value;
1200
1201 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1202 out_operands[0] = operands[0];
1203 out_operands[1] = GEN_INT (x);
1204
1205 if (trick_no > 2)
1206 out_operands[2] = GEN_INT (y);
1207
1208 /* Select dst format based on mode. */
1209 if (mode == DImode && (! TARGET_LITTLE_END))
1210 dst_fmt = "%R0";
1211 else
1212 dst_fmt = "%0";
1213
1214 if (x >= 0 && x <= 127)
1215 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1216
1217 /* Try exact power of two. */
1218 else if (CONST_OK_FOR_M (x))
1219 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1220
1221 /* Try exact power of two - 1. */
1222 else if (CONST_OK_FOR_N (x))
1223 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1224
1225 else
1226 {
1227 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1228 gcc_unreachable ();
1229 }
1230
1231 switch (trick_no)
1232 {
1233 case 1:
1234 strcpy (buf, load_op);
1235 break;
1236 case 2: /* not */
1237 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1238 break;
1239 case 3: /* add */
1240 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1241 break;
1242 case 4: /* sub */
1243 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1244 break;
1245 case 5: /* rsub */
1246 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1247 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1248 break;
1249 case 6: /* bseti */
1250 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1251 break;
1252 case 7: /* bclr */
1253 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1254 break;
1255 case 8: /* rotl */
1256 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1257 break;
1258 case 9: /* lsl */
1259 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1260 break;
1261 case 10: /* ixh */
1262 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1263 break;
1264 case 11: /* ixw */
1265 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1266 break;
1267 default:
1268 return "";
1269 }
1270
1271 output_asm_insn (buf, out_operands);
1272
1273 return "";
1274 }
1275
1276 /* Output a move of a word or less value. */
1277
1278 const char *
1279 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1280 machine_mode mode ATTRIBUTE_UNUSED)
1281 {
1282 rtx dst = operands[0];
1283 rtx src = operands[1];
1284
1285 if (GET_CODE (dst) == REG)
1286 {
1287 if (GET_CODE (src) == REG)
1288 {
1289 if (REGNO (src) == CC_REG) /* r-c */
1290 return "mvc\t%0";
1291 else
1292 return "mov\t%0,%1"; /* r-r*/
1293 }
1294 else if (GET_CODE (src) == MEM)
1295 {
1296 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1297 return "lrw\t%0,[%1]"; /* a-R */
1298 else
1299 switch (GET_MODE (src)) /* r-m */
1300 {
1301 case E_SImode:
1302 return "ldw\t%0,%1";
1303 case E_HImode:
1304 return "ld.h\t%0,%1";
1305 case E_QImode:
1306 return "ld.b\t%0,%1";
1307 default:
1308 gcc_unreachable ();
1309 }
1310 }
1311 else if (GET_CODE (src) == CONST_INT)
1312 {
1313 HOST_WIDE_INT x, y;
1314
1315 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1316 return "movi\t%0,%1";
1317 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1318 return "bgeni\t%0,%P1\t// %1 %x1";
1319 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1320 return "bmaski\t%0,%N1\t// %1 %x1";
1321 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1322 return output_inline_const (SImode, operands); /* 1-2 insns */
1323 else
1324 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1325 }
1326 else
1327 return "lrw\t%0, %1"; /* Into the literal pool. */
1328 }
1329 else if (GET_CODE (dst) == MEM) /* m-r */
1330 switch (GET_MODE (dst))
1331 {
1332 case E_SImode:
1333 return "stw\t%1,%0";
1334 case E_HImode:
1335 return "st.h\t%1,%0";
1336 case E_QImode:
1337 return "st.b\t%1,%0";
1338 default:
1339 gcc_unreachable ();
1340 }
1341
1342 gcc_unreachable ();
1343 }
1344
1345 /* Return a sequence of instructions to perform DI or DF move.
1346 Since the MCORE cannot move a DI or DF in one instruction, we have
1347 to take care when we see overlapping source and dest registers. */
1348
1349 const char *
1350 mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1351 {
1352 rtx dst = operands[0];
1353 rtx src = operands[1];
1354
1355 if (GET_CODE (dst) == REG)
1356 {
1357 if (GET_CODE (src) == REG)
1358 {
1359 int dstreg = REGNO (dst);
1360 int srcreg = REGNO (src);
1361
1362 /* Ensure the second source not overwritten. */
1363 if (srcreg + 1 == dstreg)
1364 return "mov %R0,%R1\n\tmov %0,%1";
1365 else
1366 return "mov %0,%1\n\tmov %R0,%R1";
1367 }
1368 else if (GET_CODE (src) == MEM)
1369 {
1370 rtx memexp = XEXP (src, 0);
1371 int dstreg = REGNO (dst);
1372 int basereg = -1;
1373
1374 if (GET_CODE (memexp) == LABEL_REF)
1375 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1376 else if (GET_CODE (memexp) == REG)
1377 basereg = REGNO (memexp);
1378 else if (GET_CODE (memexp) == PLUS)
1379 {
1380 if (GET_CODE (XEXP (memexp, 0)) == REG)
1381 basereg = REGNO (XEXP (memexp, 0));
1382 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1383 basereg = REGNO (XEXP (memexp, 1));
1384 else
1385 gcc_unreachable ();
1386 }
1387 else
1388 gcc_unreachable ();
1389
1390 /* ??? length attribute is wrong here. */
1391 if (dstreg == basereg)
1392 {
1393 /* Just load them in reverse order. */
1394 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1395
1396 /* XXX: alternative: move basereg to basereg+1
1397 and then fall through. */
1398 }
1399 else
1400 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1401 }
1402 else if (GET_CODE (src) == CONST_INT)
1403 {
1404 if (TARGET_LITTLE_END)
1405 {
1406 if (CONST_OK_FOR_I (INTVAL (src)))
1407 output_asm_insn ("movi %0,%1", operands);
1408 else if (CONST_OK_FOR_M (INTVAL (src)))
1409 output_asm_insn ("bgeni %0,%P1", operands);
1410 else if (CONST_OK_FOR_N (INTVAL (src)))
1411 output_asm_insn ("bmaski %0,%N1", operands);
1412 else
1413 gcc_unreachable ();
1414
1415 if (INTVAL (src) < 0)
1416 return "bmaski %R0,32";
1417 else
1418 return "movi %R0,0";
1419 }
1420 else
1421 {
1422 if (CONST_OK_FOR_I (INTVAL (src)))
1423 output_asm_insn ("movi %R0,%1", operands);
1424 else if (CONST_OK_FOR_M (INTVAL (src)))
1425 output_asm_insn ("bgeni %R0,%P1", operands);
1426 else if (CONST_OK_FOR_N (INTVAL (src)))
1427 output_asm_insn ("bmaski %R0,%N1", operands);
1428 else
1429 gcc_unreachable ();
1430
1431 if (INTVAL (src) < 0)
1432 return "bmaski %0,32";
1433 else
1434 return "movi %0,0";
1435 }
1436 }
1437 else
1438 gcc_unreachable ();
1439 }
1440 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1441 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1442 else
1443 gcc_unreachable ();
1444 }
1445
1446 /* Predicates used by the templates. */
1447
1448 int
1449 mcore_arith_S_operand (rtx op)
1450 {
1451 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1452 return 1;
1453
1454 return 0;
1455 }
1456
1457 /* Expand insert bit field. BRC */
1458
1459 int
1460 mcore_expand_insv (rtx operands[])
1461 {
1462 int width = INTVAL (operands[1]);
1463 int posn = INTVAL (operands[2]);
1464 int mask;
1465 rtx mreg, sreg, ereg;
1466
1467 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1468 for width==1 must be removed. Look around line 368. This is something
1469 we really want the md part to do. */
1470 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1471 {
1472 /* Do directly with bseti or bclri. */
1473 /* RBE: 2/97 consider only low bit of constant. */
1474 if ((INTVAL (operands[3]) & 1) == 0)
1475 {
1476 mask = ~(1 << posn);
1477 emit_insn (gen_rtx_SET (operands[0],
1478 gen_rtx_AND (SImode, operands[0],
1479 GEN_INT (mask))));
1480 }
1481 else
1482 {
1483 mask = 1 << posn;
1484 emit_insn (gen_rtx_SET (operands[0],
1485 gen_rtx_IOR (SImode, operands[0],
1486 GEN_INT (mask))));
1487 }
1488
1489 return 1;
1490 }
1491
1492 /* Look at some bit-field placements that we aren't interested
1493 in handling ourselves, unless specifically directed to do so. */
1494 if (! TARGET_W_FIELD)
1495 return 0; /* Generally, give up about now. */
1496
1497 if (width == 8 && posn % 8 == 0)
1498 /* Byte sized and aligned; let caller break it up. */
1499 return 0;
1500
1501 if (width == 16 && posn % 16 == 0)
1502 /* Short sized and aligned; let caller break it up. */
1503 return 0;
1504
1505 /* The general case - we can do this a little bit better than what the
1506 machine independent part tries. This will get rid of all the subregs
1507 that mess up constant folding in combine when working with relaxed
1508 immediates. */
1509
1510 /* If setting the entire field, do it directly. */
1511 if (GET_CODE (operands[3]) == CONST_INT
1512 && INTVAL (operands[3]) == ((1 << width) - 1))
1513 {
1514 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1515 emit_insn (gen_rtx_SET (operands[0],
1516 gen_rtx_IOR (SImode, operands[0], mreg)));
1517 return 1;
1518 }
1519
1520 /* Generate the clear mask. */
1521 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1522
1523 /* Clear the field, to overlay it later with the source. */
1524 emit_insn (gen_rtx_SET (operands[0],
1525 gen_rtx_AND (SImode, operands[0], mreg)));
1526
1527 /* If the source is constant 0, we've nothing to add back. */
1528 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1529 return 1;
1530
1531 /* XXX: Should we worry about more games with constant values?
1532 We've covered the high profile: set/clear single-bit and many-bit
1533 fields. How often do we see "arbitrary bit pattern" constants? */
1534 sreg = copy_to_mode_reg (SImode, operands[3]);
1535
1536 /* Extract src as same width as dst (needed for signed values). We
1537 always have to do this since we widen everything to SImode.
1538 We don't have to mask if we're shifting this up against the
1539 MSB of the register (e.g., the shift will push out any hi-order
1540 bits. */
1541 if (width + posn != (int) GET_MODE_SIZE (SImode))
1542 {
1543 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1544 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
1545 }
1546
1547 /* Insert source value in dest. */
1548 if (posn != 0)
1549 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1550 GEN_INT (posn))));
1551
1552 emit_insn (gen_rtx_SET (operands[0],
1553 gen_rtx_IOR (SImode, operands[0], sreg)));
1554
1555 return 1;
1556 }
1557 \f
1558 /* ??? Block move stuff stolen from m88k. This code has not been
1559 verified for correctness. */
1560
1561 /* Emit code to perform a block move. Choose the best method.
1562
1563 OPERANDS[0] is the destination.
1564 OPERANDS[1] is the source.
1565 OPERANDS[2] is the size.
1566 OPERANDS[3] is the alignment safe to use. */
1567
1568 /* Emit code to perform a block move with an offset sequence of ldw/st
1569 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1570 known constants. DEST and SRC are registers. OFFSET is the known
1571 starting point for the output pattern. */
1572
1573 static const machine_mode mode_from_align[] =
1574 {
1575 VOIDmode, QImode, HImode, VOIDmode, SImode,
1576 };
1577
1578 static void
1579 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1580 {
1581 rtx temp[2];
1582 machine_mode mode[2];
1583 int amount[2];
1584 bool active[2];
1585 int phase = 0;
1586 int next;
1587 int offset_ld = 0;
1588 int offset_st = 0;
1589 rtx x;
1590
1591 x = XEXP (dst_mem, 0);
1592 if (!REG_P (x))
1593 {
1594 x = force_reg (Pmode, x);
1595 dst_mem = replace_equiv_address (dst_mem, x);
1596 }
1597
1598 x = XEXP (src_mem, 0);
1599 if (!REG_P (x))
1600 {
1601 x = force_reg (Pmode, x);
1602 src_mem = replace_equiv_address (src_mem, x);
1603 }
1604
1605 active[0] = active[1] = false;
1606
1607 do
1608 {
1609 next = phase;
1610 phase ^= 1;
1611
1612 if (size > 0)
1613 {
1614 int next_amount;
1615
1616 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1617 next_amount = MIN (next_amount, align);
1618
1619 amount[next] = next_amount;
1620 mode[next] = mode_from_align[next_amount];
1621 temp[next] = gen_reg_rtx (mode[next]);
1622
1623 x = adjust_address (src_mem, mode[next], offset_ld);
1624 emit_insn (gen_rtx_SET (temp[next], x));
1625
1626 offset_ld += next_amount;
1627 size -= next_amount;
1628 active[next] = true;
1629 }
1630
1631 if (active[phase])
1632 {
1633 active[phase] = false;
1634
1635 x = adjust_address (dst_mem, mode[phase], offset_st);
1636 emit_insn (gen_rtx_SET (x, temp[phase]));
1637
1638 offset_st += amount[phase];
1639 }
1640 }
1641 while (active[next]);
1642 }
1643
1644 bool
1645 mcore_expand_block_move (rtx *operands)
1646 {
1647 HOST_WIDE_INT align, bytes, max;
1648
1649 if (GET_CODE (operands[2]) != CONST_INT)
1650 return false;
1651
1652 bytes = INTVAL (operands[2]);
1653 align = INTVAL (operands[3]);
1654
1655 if (bytes <= 0)
1656 return false;
1657 if (align > 4)
1658 align = 4;
1659
1660 switch (align)
1661 {
1662 case 4:
1663 if (bytes & 1)
1664 max = 4*4;
1665 else if (bytes & 3)
1666 max = 8*4;
1667 else
1668 max = 16*4;
1669 break;
1670 case 2:
1671 max = 4*2;
1672 break;
1673 case 1:
1674 max = 4*1;
1675 break;
1676 default:
1677 gcc_unreachable ();
1678 }
1679
1680 if (bytes <= max)
1681 {
1682 block_move_sequence (operands[0], operands[1], bytes, align);
1683 return true;
1684 }
1685
1686 return false;
1687 }
1688 \f
1689
1690 /* Code to generate prologue and epilogue sequences. */
1691 static int number_of_regs_before_varargs;
1692
1693 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1694 for a varargs function. */
1695 static int current_function_anonymous_args;
1696
1697 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1698 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1699 #define ADDI_REACH (32) /* Maximum addi operand. */
1700
1701 static void
1702 layout_mcore_frame (struct mcore_frame * infp)
1703 {
1704 int n;
1705 unsigned int i;
1706 int nbytes;
1707 int regarg;
1708 int localregarg;
1709 int outbounds;
1710 unsigned int growths;
1711 int step;
1712
1713 /* Might have to spill bytes to re-assemble a big argument that
1714 was passed partially in registers and partially on the stack. */
1715 nbytes = crtl->args.pretend_args_size;
1716
1717 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1718 if (current_function_anonymous_args)
1719 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1720
1721 infp->arg_size = nbytes;
1722
1723 /* How much space to save non-volatile registers we stomp. */
1724 infp->reg_mask = calc_live_regs (& n);
1725 infp->reg_size = n * 4;
1726
1727 /* And the rest of it... locals and space for overflowed outbounds. */
1728 infp->local_size = get_frame_size ();
1729 infp->outbound_size = crtl->outgoing_args_size;
1730
1731 /* Make sure we have a whole number of words for the locals. */
1732 if (infp->local_size % STACK_BYTES)
1733 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1734
1735 /* Only thing we know we have to pad is the outbound space, since
1736 we've aligned our locals assuming that base of locals is aligned. */
1737 infp->pad_local = 0;
1738 infp->pad_reg = 0;
1739 infp->pad_outbound = 0;
1740 if (infp->outbound_size % STACK_BYTES)
1741 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1742
1743 /* Now we see how we want to stage the prologue so that it does
1744 the most appropriate stack growth and register saves to either:
1745 (1) run fast,
1746 (2) reduce instruction space, or
1747 (3) reduce stack space. */
1748 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1749 infp->growth[i] = 0;
1750
1751 regarg = infp->reg_size + infp->arg_size;
1752 localregarg = infp->local_size + regarg;
1753 outbounds = infp->outbound_size + infp->pad_outbound;
1754 growths = 0;
1755
1756 /* XXX: Consider one where we consider localregarg + outbound too! */
1757
1758 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1759 use stw's with offsets and buy the frame in one shot. */
1760 if (localregarg <= ADDI_REACH
1761 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1762 {
1763 /* Make sure we'll be aligned. */
1764 if (localregarg % STACK_BYTES)
1765 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1766
1767 step = localregarg + infp->pad_reg;
1768 infp->reg_offset = infp->local_size;
1769
1770 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1771 {
1772 step += outbounds;
1773 infp->reg_offset += outbounds;
1774 outbounds = 0;
1775 }
1776
1777 infp->arg_offset = step - 4;
1778 infp->growth[growths++] = step;
1779 infp->reg_growth = growths;
1780 infp->local_growth = growths;
1781
1782 /* If we haven't already folded it in. */
1783 if (outbounds)
1784 infp->growth[growths++] = outbounds;
1785
1786 goto finish;
1787 }
1788
1789 /* Frame can't be done with a single subi, but can be done with 2
1790 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1791 shift some of the stack purchase into the first subi, so both are
1792 single instructions. */
1793 if (localregarg <= STORE_REACH
1794 && (infp->local_size > ADDI_REACH)
1795 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1796 {
1797 int all;
1798
1799 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1800 if (localregarg % STACK_BYTES)
1801 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1802
1803 all = localregarg + infp->pad_reg + infp->pad_local;
1804 step = ADDI_REACH; /* As much up front as we can. */
1805 if (step > all)
1806 step = all;
1807
1808 /* XXX: Consider whether step will still be aligned; we believe so. */
1809 infp->arg_offset = step - 4;
1810 infp->growth[growths++] = step;
1811 infp->reg_growth = growths;
1812 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1813 all -= step;
1814
1815 /* Can we fold in any space required for outbounds? */
1816 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1817 {
1818 all += outbounds;
1819 outbounds = 0;
1820 }
1821
1822 /* Get the rest of the locals in place. */
1823 step = all;
1824 infp->growth[growths++] = step;
1825 infp->local_growth = growths;
1826 all -= step;
1827
1828 gcc_assert (all == 0);
1829
1830 /* Finish off if we need to do so. */
1831 if (outbounds)
1832 infp->growth[growths++] = outbounds;
1833
1834 goto finish;
1835 }
1836
1837 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1838 Then we buy the rest of the frame in 1 or 2 steps depending on
1839 whether we need a frame pointer. */
1840 if ((regarg % STACK_BYTES) == 0)
1841 {
1842 infp->growth[growths++] = regarg;
1843 infp->reg_growth = growths;
1844 infp->arg_offset = regarg - 4;
1845 infp->reg_offset = 0;
1846
1847 if (infp->local_size % STACK_BYTES)
1848 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1849
1850 step = infp->local_size + infp->pad_local;
1851
1852 if (!frame_pointer_needed)
1853 {
1854 step += outbounds;
1855 outbounds = 0;
1856 }
1857
1858 infp->growth[growths++] = step;
1859 infp->local_growth = growths;
1860
1861 /* If there's any left to be done. */
1862 if (outbounds)
1863 infp->growth[growths++] = outbounds;
1864
1865 goto finish;
1866 }
1867
1868 /* XXX: optimizations that we'll want to play with....
1869 -- regarg is not aligned, but it's a small number of registers;
1870 use some of localsize so that regarg is aligned and then
1871 save the registers. */
1872
1873 /* Simple encoding; plods down the stack buying the pieces as it goes.
1874 -- does not optimize space consumption.
1875 -- does not attempt to optimize instruction counts.
1876 -- but it is safe for all alignments. */
1877 if (regarg % STACK_BYTES != 0)
1878 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1879
1880 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1881 infp->reg_growth = growths;
1882 infp->arg_offset = infp->growth[0] - 4;
1883 infp->reg_offset = 0;
1884
1885 if (frame_pointer_needed)
1886 {
1887 if (infp->local_size % STACK_BYTES != 0)
1888 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1889
1890 infp->growth[growths++] = infp->local_size + infp->pad_local;
1891 infp->local_growth = growths;
1892
1893 infp->growth[growths++] = outbounds;
1894 }
1895 else
1896 {
1897 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1898 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1899
1900 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1901 infp->local_growth = growths;
1902 }
1903
1904 /* Anything else that we've forgotten?, plus a few consistency checks. */
1905 finish:
1906 gcc_assert (infp->reg_offset >= 0);
1907 gcc_assert (growths <= MAX_STACK_GROWS);
1908
1909 for (i = 0; i < growths; i++)
1910 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1911 }
1912
1913 /* Define the offset between two registers, one to be eliminated, and
1914 the other its replacement, at the start of a routine. */
1915
1916 int
1917 mcore_initial_elimination_offset (int from, int to)
1918 {
1919 int above_frame;
1920 int below_frame;
1921 struct mcore_frame fi;
1922
1923 layout_mcore_frame (& fi);
1924
1925 /* fp to ap */
1926 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1927 /* sp to fp */
1928 below_frame = fi.outbound_size + fi.pad_outbound;
1929
1930 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1931 return above_frame;
1932
1933 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1934 return above_frame + below_frame;
1935
1936 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1937 return below_frame;
1938
1939 gcc_unreachable ();
1940 }
1941
1942 /* Keep track of some information about varargs for the prolog. */
1943
1944 static void
1945 mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
1946 const function_arg_info &arg,
1947 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1948 int second_time ATTRIBUTE_UNUSED)
1949 {
1950 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1951
1952 current_function_anonymous_args = 1;
1953
1954 /* We need to know how many argument registers are used before
1955 the varargs start, so that we can push the remaining argument
1956 registers during the prologue. */
1957 number_of_regs_before_varargs
1958 = *args_so_far + mcore_num_arg_regs (arg.mode, arg.type);
1959
1960 /* There is a bug somewhere in the arg handling code.
1961 Until I can find it this workaround always pushes the
1962 last named argument onto the stack. */
1963 number_of_regs_before_varargs = *args_so_far;
1964
1965 /* The last named argument may be split between argument registers
1966 and the stack. Allow for this here. */
1967 if (number_of_regs_before_varargs > NPARM_REGS)
1968 number_of_regs_before_varargs = NPARM_REGS;
1969 }
1970
1971 void
1972 mcore_expand_prolog (void)
1973 {
1974 struct mcore_frame fi;
1975 int space_allocated = 0;
1976 int growth = 0;
1977
1978 /* Find out what we're doing. */
1979 layout_mcore_frame (&fi);
1980
1981 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1982 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1983
1984 if (TARGET_CG_DATA)
1985 {
1986 /* Emit a symbol for this routine's frame size. */
1987 rtx x;
1988
1989 x = DECL_RTL (current_function_decl);
1990
1991 gcc_assert (GET_CODE (x) == MEM);
1992
1993 x = XEXP (x, 0);
1994
1995 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1996
1997 free (mcore_current_function_name);
1998
1999 mcore_current_function_name = xstrdup (XSTR (x, 0));
2000
2001 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2002
2003 if (cfun->calls_alloca)
2004 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2005
2006 /* 970425: RBE:
2007 We're looking at how the 8byte alignment affects stack layout
2008 and where we had to pad things. This emits information we can
2009 extract which tells us about frame sizes and the like. */
2010 fprintf (asm_out_file,
2011 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2012 mcore_current_function_name,
2013 fi.arg_size, fi.reg_size, fi.reg_mask,
2014 fi.local_size, fi.outbound_size,
2015 frame_pointer_needed);
2016 }
2017
2018 if (mcore_naked_function_p ())
2019 return;
2020
2021 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2022 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2023
2024 /* If we have a parameter passed partially in regs and partially in memory,
2025 the registers will have been stored to memory already in function.c. So
2026 we only need to do something here for varargs functions. */
2027 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
2028 {
2029 int offset;
2030 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2031 int remaining = fi.arg_size;
2032
2033 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2034 {
2035 emit_insn (gen_movsi
2036 (gen_rtx_MEM (SImode,
2037 plus_constant (Pmode, stack_pointer_rtx,
2038 offset)),
2039 gen_rtx_REG (SImode, rn)));
2040 }
2041 }
2042
2043 /* Do we need another stack adjustment before we do the register saves? */
2044 if (growth < fi.reg_growth)
2045 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2046
2047 if (fi.reg_size != 0)
2048 {
2049 int i;
2050 int offs = fi.reg_offset;
2051
2052 for (i = 15; i >= 0; i--)
2053 {
2054 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2055 {
2056 int first_reg = 15;
2057
2058 while (fi.reg_mask & (1 << first_reg))
2059 first_reg--;
2060 first_reg++;
2061
2062 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2063 gen_rtx_REG (SImode, first_reg),
2064 GEN_INT (16 - first_reg)));
2065
2066 i -= (15 - first_reg);
2067 offs += (16 - first_reg) * 4;
2068 }
2069 else if (fi.reg_mask & (1 << i))
2070 {
2071 emit_insn (gen_movsi
2072 (gen_rtx_MEM (SImode,
2073 plus_constant (Pmode, stack_pointer_rtx,
2074 offs)),
2075 gen_rtx_REG (SImode, i)));
2076 offs += 4;
2077 }
2078 }
2079 }
2080
2081 /* Figure the locals + outbounds. */
2082 if (frame_pointer_needed)
2083 {
2084 /* If we haven't already purchased to 'fp'. */
2085 if (growth < fi.local_growth)
2086 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2087
2088 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2089
2090 /* ... and then go any remaining distance for outbounds, etc. */
2091 if (fi.growth[growth])
2092 output_stack_adjust (-1, fi.growth[growth++]);
2093 }
2094 else
2095 {
2096 if (growth < fi.local_growth)
2097 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2098 if (fi.growth[growth])
2099 output_stack_adjust (-1, fi.growth[growth++]);
2100 }
2101 }
2102
2103 void
2104 mcore_expand_epilog (void)
2105 {
2106 struct mcore_frame fi;
2107 int i;
2108 int offs;
2109 int growth = MAX_STACK_GROWS - 1 ;
2110
2111
2112 /* Find out what we're doing. */
2113 layout_mcore_frame(&fi);
2114
2115 if (mcore_naked_function_p ())
2116 return;
2117
2118 /* If we had a frame pointer, restore the sp from that. */
2119 if (frame_pointer_needed)
2120 {
2121 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2122 growth = fi.local_growth - 1;
2123 }
2124 else
2125 {
2126 /* XXX: while loop should accumulate and do a single sell. */
2127 while (growth >= fi.local_growth)
2128 {
2129 if (fi.growth[growth] != 0)
2130 output_stack_adjust (1, fi.growth[growth]);
2131 growth--;
2132 }
2133 }
2134
2135 /* Make sure we've shrunk stack back to the point where the registers
2136 were laid down. This is typically 0/1 iterations. Then pull the
2137 register save information back off the stack. */
2138 while (growth >= fi.reg_growth)
2139 output_stack_adjust ( 1, fi.growth[growth--]);
2140
2141 offs = fi.reg_offset;
2142
2143 for (i = 15; i >= 0; i--)
2144 {
2145 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2146 {
2147 int first_reg;
2148
2149 /* Find the starting register. */
2150 first_reg = 15;
2151
2152 while (fi.reg_mask & (1 << first_reg))
2153 first_reg--;
2154
2155 first_reg++;
2156
2157 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2158 gen_rtx_MEM (SImode, stack_pointer_rtx),
2159 GEN_INT (16 - first_reg)));
2160
2161 i -= (15 - first_reg);
2162 offs += (16 - first_reg) * 4;
2163 }
2164 else if (fi.reg_mask & (1 << i))
2165 {
2166 emit_insn (gen_movsi
2167 (gen_rtx_REG (SImode, i),
2168 gen_rtx_MEM (SImode,
2169 plus_constant (Pmode, stack_pointer_rtx,
2170 offs))));
2171 offs += 4;
2172 }
2173 }
2174
2175 /* Give back anything else. */
2176 /* XXX: Should accumulate total and then give it back. */
2177 while (growth >= 0)
2178 output_stack_adjust ( 1, fi.growth[growth--]);
2179 }
2180 \f
2181 /* This code is borrowed from the SH port. */
2182
2183 /* The MCORE cannot load a large constant into a register, constants have to
2184 come from a pc relative load. The reference of a pc relative load
2185 instruction must be less than 1k in front of the instruction. This
2186 means that we often have to dump a constant inside a function, and
2187 generate code to branch around it.
2188
2189 It is important to minimize this, since the branches will slow things
2190 down and make things bigger.
2191
2192 Worst case code looks like:
2193
2194 lrw L1,r0
2195 br L2
2196 align
2197 L1: .long value
2198 L2:
2199 ..
2200
2201 lrw L3,r0
2202 br L4
2203 align
2204 L3: .long value
2205 L4:
2206 ..
2207
2208 We fix this by performing a scan before scheduling, which notices which
2209 instructions need to have their operands fetched from the constant table
2210 and builds the table.
2211
2212 The algorithm is:
2213
2214 scan, find an instruction which needs a pcrel move. Look forward, find the
2215 last barrier which is within MAX_COUNT bytes of the requirement.
2216 If there isn't one, make one. Process all the instructions between
2217 the find and the barrier.
2218
2219 In the above example, we can tell that L3 is within 1k of L1, so
2220 the first move can be shrunk from the 2 insn+constant sequence into
2221 just 1 insn, and the constant moved to L3 to make:
2222
2223 lrw L1,r0
2224 ..
2225 lrw L3,r0
2226 bra L4
2227 align
2228 L3:.long value
2229 L4:.long value
2230
2231 Then the second move becomes the target for the shortening process. */
2232
2233 typedef struct
2234 {
2235 rtx value; /* Value in table. */
2236 rtx label; /* Label of value. */
2237 } pool_node;
2238
2239 /* The maximum number of constants that can fit into one pool, since
2240 the pc relative range is 0...1020 bytes and constants are at least 4
2241 bytes long. We subtract 4 from the range to allow for the case where
2242 we need to add a branch/align before the constant pool. */
2243
2244 #define MAX_COUNT 1016
2245 #define MAX_POOL_SIZE (MAX_COUNT/4)
2246 static pool_node pool_vector[MAX_POOL_SIZE];
2247 static int pool_size;
2248
2249 /* Dump out any constants accumulated in the final pass. These
2250 will only be labels. */
2251
2252 const char *
2253 mcore_output_jump_label_table (void)
2254 {
2255 int i;
2256
2257 if (pool_size)
2258 {
2259 fprintf (asm_out_file, "\t.align 2\n");
2260
2261 for (i = 0; i < pool_size; i++)
2262 {
2263 pool_node * p = pool_vector + i;
2264
2265 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2266
2267 output_asm_insn (".long %0", &p->value);
2268 }
2269
2270 pool_size = 0;
2271 }
2272
2273 return "";
2274 }
2275
2276 /* Check whether insn is a candidate for a conditional. */
2277
2278 static cond_type
2279 is_cond_candidate (rtx insn)
2280 {
2281 /* The only things we conditionalize are those that can be directly
2282 changed into a conditional. Only bother with SImode items. If
2283 we wanted to be a little more aggressive, we could also do other
2284 modes such as DImode with reg-reg move or load 0. */
2285 if (NONJUMP_INSN_P (insn))
2286 {
2287 rtx pat = PATTERN (insn);
2288 rtx src, dst;
2289
2290 if (GET_CODE (pat) != SET)
2291 return COND_NO;
2292
2293 dst = XEXP (pat, 0);
2294
2295 if ((GET_CODE (dst) != REG &&
2296 GET_CODE (dst) != SUBREG) ||
2297 GET_MODE (dst) != SImode)
2298 return COND_NO;
2299
2300 src = XEXP (pat, 1);
2301
2302 if ((GET_CODE (src) == REG ||
2303 (GET_CODE (src) == SUBREG &&
2304 GET_CODE (SUBREG_REG (src)) == REG)) &&
2305 GET_MODE (src) == SImode)
2306 return COND_MOV_INSN;
2307 else if (GET_CODE (src) == CONST_INT &&
2308 INTVAL (src) == 0)
2309 return COND_CLR_INSN;
2310 else if (GET_CODE (src) == PLUS &&
2311 (GET_CODE (XEXP (src, 0)) == REG ||
2312 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2313 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2314 GET_MODE (XEXP (src, 0)) == SImode &&
2315 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2316 INTVAL (XEXP (src, 1)) == 1)
2317 return COND_INC_INSN;
2318 else if (((GET_CODE (src) == MINUS &&
2319 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2320 INTVAL( XEXP (src, 1)) == 1) ||
2321 (GET_CODE (src) == PLUS &&
2322 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2323 INTVAL (XEXP (src, 1)) == -1)) &&
2324 (GET_CODE (XEXP (src, 0)) == REG ||
2325 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2326 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2327 GET_MODE (XEXP (src, 0)) == SImode)
2328 return COND_DEC_INSN;
2329
2330 /* Some insns that we don't bother with:
2331 (set (rx:DI) (ry:DI))
2332 (set (rx:DI) (const_int 0))
2333 */
2334
2335 }
2336 else if (JUMP_P (insn)
2337 && GET_CODE (PATTERN (insn)) == SET
2338 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2339 return COND_BRANCH_INSN;
2340
2341 return COND_NO;
2342 }
2343
2344 /* Emit a conditional version of insn and replace the old insn with the
2345 new one. Return the new insn if emitted. */
2346
2347 static rtx_insn *
2348 emit_new_cond_insn (rtx_insn *insn, int cond)
2349 {
2350 rtx c_insn = 0;
2351 rtx pat, dst, src;
2352 cond_type num;
2353
2354 if ((num = is_cond_candidate (insn)) == COND_NO)
2355 return NULL;
2356
2357 pat = PATTERN (insn);
2358
2359 if (NONJUMP_INSN_P (insn))
2360 {
2361 dst = SET_DEST (pat);
2362 src = SET_SRC (pat);
2363 }
2364 else
2365 {
2366 dst = JUMP_LABEL (insn);
2367 src = NULL_RTX;
2368 }
2369
2370 switch (num)
2371 {
2372 case COND_MOV_INSN:
2373 case COND_CLR_INSN:
2374 if (cond)
2375 c_insn = gen_movt0 (dst, src, dst);
2376 else
2377 c_insn = gen_movt0 (dst, dst, src);
2378 break;
2379
2380 case COND_INC_INSN:
2381 if (cond)
2382 c_insn = gen_incscc (dst, dst);
2383 else
2384 c_insn = gen_incscc_false (dst, dst);
2385 break;
2386
2387 case COND_DEC_INSN:
2388 if (cond)
2389 c_insn = gen_decscc (dst, dst);
2390 else
2391 c_insn = gen_decscc_false (dst, dst);
2392 break;
2393
2394 case COND_BRANCH_INSN:
2395 if (cond)
2396 c_insn = gen_branch_true (dst);
2397 else
2398 c_insn = gen_branch_false (dst);
2399 break;
2400
2401 default:
2402 return NULL;
2403 }
2404
2405 /* Only copy the notes if they exist. */
2406 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2407 {
2408 /* We really don't need to bother with the notes and links at this
2409 point, but go ahead and save the notes. This will help is_dead()
2410 when applying peepholes (links don't matter since they are not
2411 used any more beyond this point for the mcore). */
2412 REG_NOTES (c_insn) = REG_NOTES (insn);
2413 }
2414
2415 if (num == COND_BRANCH_INSN)
2416 {
2417 /* For jumps, we need to be a little bit careful and emit the new jump
2418 before the old one and to update the use count for the target label.
2419 This way, the barrier following the old (uncond) jump will get
2420 deleted, but the label won't. */
2421 c_insn = emit_jump_insn_before (c_insn, insn);
2422
2423 ++ LABEL_NUSES (dst);
2424
2425 JUMP_LABEL (c_insn) = dst;
2426 }
2427 else
2428 c_insn = emit_insn_after (c_insn, insn);
2429
2430 delete_insn (insn);
2431
2432 return as_a <rtx_insn *> (c_insn);
2433 }
2434
2435 /* Attempt to change a basic block into a series of conditional insns. This
2436 works by taking the branch at the end of the 1st block and scanning for the
2437 end of the 2nd block. If all instructions in the 2nd block have cond.
2438 versions and the label at the start of block 3 is the same as the target
2439 from the branch at block 1, then conditionalize all insn in block 2 using
2440 the inverse condition of the branch at block 1. (Note I'm bending the
2441 definition of basic block here.)
2442
2443 e.g., change:
2444
2445 bt L2 <-- end of block 1 (delete)
2446 mov r7,r8
2447 addu r7,1
2448 br L3 <-- end of block 2
2449
2450 L2: ... <-- start of block 3 (NUSES==1)
2451 L3: ...
2452
2453 to:
2454
2455 movf r7,r8
2456 incf r7
2457 bf L3
2458
2459 L3: ...
2460
2461 we can delete the L2 label if NUSES==1 and re-apply the optimization
2462 starting at the last instruction of block 2. This may allow an entire
2463 if-then-else statement to be conditionalized. BRC */
2464 static rtx_insn *
2465 conditionalize_block (rtx_insn *first)
2466 {
2467 rtx_insn *insn;
2468 rtx br_pat;
2469 rtx_insn *end_blk_1_br = 0;
2470 rtx_insn *end_blk_2_insn = 0;
2471 rtx_insn *start_blk_3_lab = 0;
2472 int cond;
2473 int br_lab_num;
2474 int blk_size = 0;
2475
2476
2477 /* Check that the first insn is a candidate conditional jump. This is
2478 the one that we'll eliminate. If not, advance to the next insn to
2479 try. */
2480 if (! JUMP_P (first)
2481 || GET_CODE (PATTERN (first)) != SET
2482 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2483 return NEXT_INSN (first);
2484
2485 /* Extract some information we need. */
2486 end_blk_1_br = first;
2487 br_pat = PATTERN (end_blk_1_br);
2488
2489 /* Complement the condition since we use the reverse cond. for the insns. */
2490 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2491
2492 /* Determine what kind of branch we have. */
2493 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2494 {
2495 /* A normal branch, so extract label out of first arm. */
2496 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2497 }
2498 else
2499 {
2500 /* An inverse branch, so extract the label out of the 2nd arm
2501 and complement the condition. */
2502 cond = (cond == 0);
2503 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2504 }
2505
2506 /* Scan forward for the start of block 2: it must start with a
2507 label and that label must be the same as the branch target
2508 label from block 1. We don't care about whether block 2 actually
2509 ends with a branch or a label (an uncond. branch is
2510 conditionalizable). */
2511 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2512 {
2513 enum rtx_code code;
2514
2515 code = GET_CODE (insn);
2516
2517 /* Look for the label at the start of block 3. */
2518 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2519 break;
2520
2521 /* Skip barriers, notes, and conditionalizable insns. If the
2522 insn is not conditionalizable or makes this optimization fail,
2523 just return the next insn so we can start over from that point. */
2524 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2525 return NEXT_INSN (insn);
2526
2527 /* Remember the last real insn before the label (i.e. end of block 2). */
2528 if (code == JUMP_INSN || code == INSN)
2529 {
2530 blk_size ++;
2531 end_blk_2_insn = insn;
2532 }
2533 }
2534
2535 if (!insn)
2536 return insn;
2537
2538 /* It is possible for this optimization to slow performance if the blocks
2539 are long. This really depends upon whether the branch is likely taken
2540 or not. If the branch is taken, we slow performance in many cases. But,
2541 if the branch is not taken, we always help performance (for a single
2542 block, but for a double block (i.e. when the optimization is re-applied)
2543 this is not true since the 'right thing' depends on the overall length of
2544 the collapsed block). As a compromise, don't apply this optimization on
2545 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2546 the best threshold depends on the latencies of the instructions (i.e.,
2547 the branch penalty). */
2548 if (optimize > 1 && blk_size > 2)
2549 return insn;
2550
2551 /* At this point, we've found the start of block 3 and we know that
2552 it is the destination of the branch from block 1. Also, all
2553 instructions in the block 2 are conditionalizable. So, apply the
2554 conditionalization and delete the branch. */
2555 start_blk_3_lab = insn;
2556
2557 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2558 insn = NEXT_INSN (insn))
2559 {
2560 rtx_insn *newinsn;
2561
2562 if (insn->deleted ())
2563 continue;
2564
2565 /* Try to form a conditional variant of the instruction and emit it. */
2566 if ((newinsn = emit_new_cond_insn (insn, cond)))
2567 {
2568 if (end_blk_2_insn == insn)
2569 end_blk_2_insn = newinsn;
2570
2571 insn = newinsn;
2572 }
2573 }
2574
2575 /* Note whether we will delete the label starting blk 3 when the jump
2576 gets deleted. If so, we want to re-apply this optimization at the
2577 last real instruction right before the label. */
2578 if (LABEL_NUSES (start_blk_3_lab) == 1)
2579 {
2580 start_blk_3_lab = 0;
2581 }
2582
2583 /* ??? we probably should redistribute the death notes for this insn, esp.
2584 the death of cc, but it doesn't really matter this late in the game.
2585 The peepholes all use is_dead() which will find the correct death
2586 regardless of whether there is a note. */
2587 delete_insn (end_blk_1_br);
2588
2589 if (! start_blk_3_lab)
2590 return end_blk_2_insn;
2591
2592 /* Return the insn right after the label at the start of block 3. */
2593 return NEXT_INSN (start_blk_3_lab);
2594 }
2595
2596 /* Apply the conditionalization of blocks optimization. This is the
2597 outer loop that traverses through the insns scanning for a branch
2598 that signifies an opportunity to apply the optimization. Note that
2599 this optimization is applied late. If we could apply it earlier,
2600 say before cse 2, it may expose more optimization opportunities.
2601 but, the pay back probably isn't really worth the effort (we'd have
2602 to update all reg/flow/notes/links/etc to make it work - and stick it
2603 in before cse 2). */
2604
2605 static void
2606 conditionalize_optimization (void)
2607 {
2608 rtx_insn *insn;
2609
2610 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2611 continue;
2612 }
2613
2614 /* This is to handle loads from the constant pool. */
2615
2616 static void
2617 mcore_reorg (void)
2618 {
2619 /* Reset this variable. */
2620 current_function_anonymous_args = 0;
2621
2622 if (optimize == 0)
2623 return;
2624
2625 /* Conditionalize blocks where we can. */
2626 conditionalize_optimization ();
2627
2628 /* Literal pool generation is now pushed off until the assembler. */
2629 }
2630
2631 \f
2632 /* Return true if X is something that can be moved directly into r15. */
2633
2634 bool
2635 mcore_r15_operand_p (rtx x)
2636 {
2637 switch (GET_CODE (x))
2638 {
2639 case CONST_INT:
2640 return mcore_const_ok_for_inline (INTVAL (x));
2641
2642 case REG:
2643 case SUBREG:
2644 case MEM:
2645 return 1;
2646
2647 default:
2648 return 0;
2649 }
2650 }
2651
2652 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2653 directly move X into it, use r1-r14 as a temporary. */
2654
2655 enum reg_class
2656 mcore_secondary_reload_class (enum reg_class rclass,
2657 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2658 {
2659 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2660 && !mcore_r15_operand_p (x))
2661 return LRW_REGS;
2662 return NO_REGS;
2663 }
2664
2665 /* Return the reg_class to use when reloading the rtx X into the class
2666 RCLASS. If X is too complex to move directly into r15, prefer to
2667 use LRW_REGS instead. */
2668
2669 enum reg_class
2670 mcore_reload_class (rtx x, enum reg_class rclass)
2671 {
2672 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2673 return LRW_REGS;
2674
2675 return rclass;
2676 }
2677
2678 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2679 register. Note that the current version doesn't worry about whether
2680 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2681 in r2 matches an SImode in r2. Might think in the future about whether
2682 we want to be able to say something about modes. */
2683
2684 int
2685 mcore_is_same_reg (rtx x, rtx y)
2686 {
2687 /* Strip any and all of the subreg wrappers. */
2688 while (GET_CODE (x) == SUBREG)
2689 x = SUBREG_REG (x);
2690
2691 while (GET_CODE (y) == SUBREG)
2692 y = SUBREG_REG (y);
2693
2694 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2695 return 1;
2696
2697 return 0;
2698 }
2699
2700 static void
2701 mcore_option_override (void)
2702 {
2703 /* Only the m340 supports little endian code. */
2704 if (TARGET_LITTLE_END && ! TARGET_M340)
2705 target_flags |= MASK_M340;
2706 }
2707
2708 \f
2709 /* Compute the number of word sized registers needed to
2710 hold a function argument of mode MODE and type TYPE. */
2711
2712 int
2713 mcore_num_arg_regs (machine_mode mode, const_tree type)
2714 {
2715 int size;
2716
2717 if (targetm.calls.must_pass_in_stack (mode, type))
2718 return 0;
2719
2720 if (type && mode == BLKmode)
2721 size = int_size_in_bytes (type);
2722 else
2723 size = GET_MODE_SIZE (mode);
2724
2725 return ROUND_ADVANCE (size);
2726 }
2727
2728 static rtx
2729 handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
2730 {
2731 int size;
2732
2733 /* The MCore ABI defines that a structure whose size is not a whole multiple
2734 of bytes is passed packed into registers (or spilled onto the stack if
2735 not enough registers are available) with the last few bytes of the
2736 structure being packed, left-justified, into the last register/stack slot.
2737 GCC handles this correctly if the last word is in a stack slot, but we
2738 have to generate a special, PARALLEL RTX if the last word is in an
2739 argument register. */
2740 if (type
2741 && TYPE_MODE (type) == BLKmode
2742 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2743 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2744 && (size % UNITS_PER_WORD != 0)
2745 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2746 {
2747 rtx arg_regs [NPARM_REGS];
2748 int nregs;
2749 rtx result;
2750 rtvec rtvec;
2751
2752 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2753 {
2754 arg_regs [nregs] =
2755 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2756 GEN_INT (nregs * UNITS_PER_WORD));
2757 nregs ++;
2758 }
2759
2760 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2761 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2762 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2763 arg_regs[3], arg_regs[4], arg_regs[5]);
2764
2765 result = gen_rtx_PARALLEL (mode, rtvec);
2766 return result;
2767 }
2768
2769 return gen_rtx_REG (mode, reg);
2770 }
2771
2772 rtx
2773 mcore_function_value (const_tree valtype, const_tree func)
2774 {
2775 machine_mode mode;
2776 int unsigned_p;
2777
2778 mode = TYPE_MODE (valtype);
2779
2780 /* Since we promote return types, we must promote the mode here too. */
2781 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2782
2783 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2784 }
2785
2786 /* Define where to put the arguments to a function.
2787 Value is zero to push the argument on the stack,
2788 or a hard register in which to store the argument.
2789
2790 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2791 the preceding args and about the function being called.
2792 ARG is a description of the argument.
2793
2794 On MCore the first args are normally in registers
2795 and the rest are pushed. Any arg that starts within the first
2796 NPARM_REGS words is at least partially passed in a register unless
2797 its data type forbids. */
2798
2799 static rtx
2800 mcore_function_arg (cumulative_args_t cum, const function_arg_info &arg)
2801 {
2802 int arg_reg;
2803
2804 if (!arg.named || arg.end_marker_p ())
2805 return 0;
2806
2807 if (targetm.calls.must_pass_in_stack (arg.mode, arg.type))
2808 return 0;
2809
2810 arg_reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
2811
2812 if (arg_reg < NPARM_REGS)
2813 return handle_structs_in_regs (arg.mode, arg.type,
2814 FIRST_PARM_REG + arg_reg);
2815
2816 return 0;
2817 }
2818
2819 static void
2820 mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2821 const_tree type, bool named ATTRIBUTE_UNUSED)
2822 {
2823 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2824
2825 *cum = (ROUND_REG (*cum, mode)
2826 + (int)named * mcore_num_arg_regs (mode, type));
2827 }
2828
2829 static unsigned int
2830 mcore_function_arg_boundary (machine_mode mode,
2831 const_tree type ATTRIBUTE_UNUSED)
2832 {
2833 /* Doubles must be aligned to an 8 byte boundary. */
2834 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2835 ? BIGGEST_ALIGNMENT
2836 : PARM_BOUNDARY);
2837 }
2838
2839 /* Returns the number of bytes of argument registers required to hold *part*
2840 of argument ARG. If the argument fits entirely in the argument registers,
2841 or entirely on the stack, then 0 is returned. CUM is the number of
2842 argument registers already used by earlier parameters to the function. */
2843
2844 static int
2845 mcore_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
2846 {
2847 int reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
2848
2849 if (!arg.named)
2850 return 0;
2851
2852 if (targetm.calls.must_pass_in_stack (arg.mode, arg.type))
2853 return 0;
2854
2855 /* REG is not the *hardware* register number of the register that holds
2856 the argument, it is the *argument* register number. So for example,
2857 the first argument to a function goes in argument register 0, which
2858 translates (for the MCore) into hardware register 2. The second
2859 argument goes into argument register 1, which translates into hardware
2860 register 3, and so on. NPARM_REGS is the number of argument registers
2861 supported by the target, not the maximum hardware register number of
2862 the target. */
2863 if (reg >= NPARM_REGS)
2864 return 0;
2865
2866 /* If the argument fits entirely in registers, return 0. */
2867 if (reg + mcore_num_arg_regs (arg.mode, arg.type) <= NPARM_REGS)
2868 return 0;
2869
2870 /* The argument overflows the number of available argument registers.
2871 Compute how many argument registers have not yet been assigned to
2872 hold an argument. */
2873 reg = NPARM_REGS - reg;
2874
2875 /* Return partially in registers and partially on the stack. */
2876 return reg * UNITS_PER_WORD;
2877 }
2878 \f
2879 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2880
2881 int
2882 mcore_dllexport_name_p (const char * symbol)
2883 {
2884 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2885 }
2886
2887 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2888
2889 int
2890 mcore_dllimport_name_p (const char * symbol)
2891 {
2892 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2893 }
2894
2895 /* Mark a DECL as being dllexport'd. */
2896
2897 static void
2898 mcore_mark_dllexport (tree decl)
2899 {
2900 const char * oldname;
2901 char * newname;
2902 rtx rtlname;
2903 tree idp;
2904
2905 rtlname = XEXP (DECL_RTL (decl), 0);
2906
2907 if (GET_CODE (rtlname) == MEM)
2908 rtlname = XEXP (rtlname, 0);
2909 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2910 oldname = XSTR (rtlname, 0);
2911
2912 if (mcore_dllexport_name_p (oldname))
2913 return; /* Already done. */
2914
2915 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2916 sprintf (newname, "@e.%s", oldname);
2917
2918 /* We pass newname through get_identifier to ensure it has a unique
2919 address. RTL processing can sometimes peek inside the symbol ref
2920 and compare the string's addresses to see if two symbols are
2921 identical. */
2922 /* ??? At least I think that's why we do this. */
2923 idp = get_identifier (newname);
2924
2925 XEXP (DECL_RTL (decl), 0) =
2926 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2927 }
2928
2929 /* Mark a DECL as being dllimport'd. */
2930
2931 static void
2932 mcore_mark_dllimport (tree decl)
2933 {
2934 const char * oldname;
2935 char * newname;
2936 tree idp;
2937 rtx rtlname;
2938 rtx newrtl;
2939
2940 rtlname = XEXP (DECL_RTL (decl), 0);
2941
2942 if (GET_CODE (rtlname) == MEM)
2943 rtlname = XEXP (rtlname, 0);
2944 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2945 oldname = XSTR (rtlname, 0);
2946
2947 gcc_assert (!mcore_dllexport_name_p (oldname));
2948 if (mcore_dllimport_name_p (oldname))
2949 return; /* Already done. */
2950
2951 /* ??? One can well ask why we're making these checks here,
2952 and that would be a good question. */
2953
2954 /* Imported variables can't be initialized. */
2955 if (TREE_CODE (decl) == VAR_DECL
2956 && !DECL_VIRTUAL_P (decl)
2957 && DECL_INITIAL (decl))
2958 {
2959 error ("initialized variable %q+D is marked dllimport", decl);
2960 return;
2961 }
2962
2963 /* `extern' needn't be specified with dllimport.
2964 Specify `extern' now and hope for the best. Sigh. */
2965 if (TREE_CODE (decl) == VAR_DECL
2966 /* ??? Is this test for vtables needed? */
2967 && !DECL_VIRTUAL_P (decl))
2968 {
2969 DECL_EXTERNAL (decl) = 1;
2970 TREE_PUBLIC (decl) = 1;
2971 }
2972
2973 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2974 sprintf (newname, "@i.__imp_%s", oldname);
2975
2976 /* We pass newname through get_identifier to ensure it has a unique
2977 address. RTL processing can sometimes peek inside the symbol ref
2978 and compare the string's addresses to see if two symbols are
2979 identical. */
2980 /* ??? At least I think that's why we do this. */
2981 idp = get_identifier (newname);
2982
2983 newrtl = gen_rtx_MEM (Pmode,
2984 gen_rtx_SYMBOL_REF (Pmode,
2985 IDENTIFIER_POINTER (idp)));
2986 XEXP (DECL_RTL (decl), 0) = newrtl;
2987 }
2988
2989 static int
2990 mcore_dllexport_p (tree decl)
2991 {
2992 if ( TREE_CODE (decl) != VAR_DECL
2993 && TREE_CODE (decl) != FUNCTION_DECL)
2994 return 0;
2995
2996 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2997 }
2998
2999 static int
3000 mcore_dllimport_p (tree decl)
3001 {
3002 if ( TREE_CODE (decl) != VAR_DECL
3003 && TREE_CODE (decl) != FUNCTION_DECL)
3004 return 0;
3005
3006 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3007 }
3008
3009 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3010 install some info in the .drective (PE) or .exports (ELF) sections. */
3011
3012 static void
3013 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3014 {
3015 /* Mark the decl so we can tell from the rtl whether the object is
3016 dllexport'd or dllimport'd. */
3017 if (mcore_dllexport_p (decl))
3018 mcore_mark_dllexport (decl);
3019 else if (mcore_dllimport_p (decl))
3020 mcore_mark_dllimport (decl);
3021
3022 /* It might be that DECL has already been marked as dllimport, but
3023 a subsequent definition nullified that. The attribute is gone
3024 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3025 else if ((TREE_CODE (decl) == FUNCTION_DECL
3026 || TREE_CODE (decl) == VAR_DECL)
3027 && DECL_RTL (decl) != NULL_RTX
3028 && GET_CODE (DECL_RTL (decl)) == MEM
3029 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3030 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3031 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3032 {
3033 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3034 tree idp = get_identifier (oldname + 9);
3035 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3036
3037 XEXP (DECL_RTL (decl), 0) = newrtl;
3038
3039 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3040 ??? We leave these alone for now. */
3041 }
3042 }
3043
3044 /* Undo the effects of the above. */
3045
3046 static const char *
3047 mcore_strip_name_encoding (const char * str)
3048 {
3049 return str + (str[0] == '@' ? 3 : 0);
3050 }
3051
3052 /* MCore specific attribute support.
3053 dllexport - for exporting a function/variable that will live in a dll
3054 dllimport - for importing a function/variable from a dll
3055 naked - do not create a function prologue/epilogue. */
3056
3057 /* Handle a "naked" attribute; arguments as in
3058 struct attribute_spec.handler. */
3059
3060 static tree
3061 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3062 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3063 {
3064 if (TREE_CODE (*node) != FUNCTION_DECL)
3065 {
3066 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3067 name);
3068 *no_add_attrs = true;
3069 }
3070
3071 return NULL_TREE;
3072 }
3073
3074 /* ??? It looks like this is PE specific? Oh well, this is what the
3075 old code did as well. */
3076
3077 static void
3078 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3079 {
3080 int len;
3081 const char * name;
3082 char * string;
3083 const char * prefix;
3084
3085 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3086
3087 /* Strip off any encoding in name. */
3088 name = (* targetm.strip_name_encoding) (name);
3089
3090 /* The object is put in, for example, section .text$foo.
3091 The linker will then ultimately place them in .text
3092 (everything from the $ on is stripped). */
3093 if (TREE_CODE (decl) == FUNCTION_DECL)
3094 prefix = ".text$";
3095 /* For compatibility with EPOC, we ignore the fact that the
3096 section might have relocs against it. */
3097 else if (decl_readonly_section (decl, 0))
3098 prefix = ".rdata$";
3099 else
3100 prefix = ".data$";
3101
3102 len = strlen (name) + strlen (prefix);
3103 string = XALLOCAVEC (char, len + 1);
3104
3105 sprintf (string, "%s%s", prefix, name);
3106
3107 set_decl_section_name (decl, string);
3108 }
3109
3110 int
3111 mcore_naked_function_p (void)
3112 {
3113 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3114 }
3115
3116 static bool
3117 mcore_warn_func_return (tree decl)
3118 {
3119 /* Naked functions are implemented entirely in assembly, including the
3120 return sequence, so suppress warnings about this. */
3121 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3122 }
3123
3124 #ifdef OBJECT_FORMAT_ELF
3125 static void
3126 mcore_asm_named_section (const char *name,
3127 unsigned int flags ATTRIBUTE_UNUSED,
3128 tree decl ATTRIBUTE_UNUSED)
3129 {
3130 fprintf (asm_out_file, "\t.section %s\n", name);
3131 }
3132 #endif /* OBJECT_FORMAT_ELF */
3133
3134 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3135
3136 static void
3137 mcore_external_libcall (rtx fun)
3138 {
3139 fprintf (asm_out_file, "\t.import\t");
3140 assemble_name (asm_out_file, XSTR (fun, 0));
3141 fprintf (asm_out_file, "\n");
3142 }
3143
3144 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3145
3146 static bool
3147 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3148 {
3149 const HOST_WIDE_INT size = int_size_in_bytes (type);
3150 return (size == -1 || size > 2 * UNITS_PER_WORD);
3151 }
3152
3153 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3154 Output assembler code for a block containing the constant parts
3155 of a trampoline, leaving space for the variable parts.
3156
3157 On the MCore, the trampoline looks like:
3158 lrw r1, function
3159 lrw r13, area
3160 jmp r13
3161 or r0, r0
3162 .literals */
3163
3164 static void
3165 mcore_asm_trampoline_template (FILE *f)
3166 {
3167 fprintf (f, "\t.short 0x7102\n");
3168 fprintf (f, "\t.short 0x7d02\n");
3169 fprintf (f, "\t.short 0x00cd\n");
3170 fprintf (f, "\t.short 0x1e00\n");
3171 fprintf (f, "\t.long 0\n");
3172 fprintf (f, "\t.long 0\n");
3173 }
3174
3175 /* Worker function for TARGET_TRAMPOLINE_INIT. */
3176
3177 static void
3178 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3179 {
3180 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3181 rtx mem;
3182
3183 emit_block_move (m_tramp, assemble_trampoline_template (),
3184 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3185
3186 mem = adjust_address (m_tramp, SImode, 8);
3187 emit_move_insn (mem, chain_value);
3188 mem = adjust_address (m_tramp, SImode, 12);
3189 emit_move_insn (mem, fnaddr);
3190 }
3191
3192 /* Implement TARGET_LEGITIMATE_CONSTANT_P
3193
3194 On the MCore, allow anything but a double. */
3195
3196 static bool
3197 mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3198 {
3199 return GET_CODE (x) != CONST_DOUBLE;
3200 }
3201
3202 /* Helper function for `mcore_legitimate_address_p'. */
3203
3204 static bool
3205 mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3206 {
3207 if (strict_p)
3208 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3209 else
3210 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3211 }
3212
3213 static bool
3214 mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3215 {
3216 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3217 }
3218
3219 /* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3220 and for DI is 0..56 because we use two SI loads, etc. */
3221
3222 static bool
3223 mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3224 {
3225 if (CONST_INT_P (op))
3226 {
3227 if (GET_MODE_SIZE (mode) >= 4
3228 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3229 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3230 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3231 return true;
3232 if (GET_MODE_SIZE (mode) == 2
3233 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3234 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3235 return true;
3236 if (GET_MODE_SIZE (mode) == 1
3237 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3238 return true;
3239 }
3240 return false;
3241 }
3242
3243
3244 /* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3245
3246 Allow REG
3247 REG + disp */
3248
3249 static bool
3250 mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3251 addr_space_t as)
3252 {
3253 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3254
3255 if (mcore_base_register_rtx_p (x, strict_p))
3256 return true;
3257 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3258 {
3259 rtx xop0 = XEXP (x, 0);
3260 rtx xop1 = XEXP (x, 1);
3261 if (mcore_base_register_rtx_p (xop0, strict_p)
3262 && mcore_legitimate_index_p (mode, xop1))
3263 return true;
3264 if (mcore_base_register_rtx_p (xop1, strict_p)
3265 && mcore_legitimate_index_p (mode, xop0))
3266 return true;
3267 }
3268
3269 return false;
3270 }
3271
3272 /* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
3273 even registers. */
3274
3275 static bool
3276 mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3277 {
3278 if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
3279 return (regno & 1) == 0;
3280
3281 return regno < 18;
3282 }
3283
3284 /* Implement TARGET_MODES_TIEABLE_P. */
3285
3286 static bool
3287 mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3288 {
3289 return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);
3290 }