]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/lm32/lm32.c
common.opt (main_input_filename, [...]): New Variable entries.
[thirdparty/gcc.git] / gcc / config / lm32 / lm32.c
1 /* Subroutines used for code generation on the Lattice Mico32 architecture.
2 Contributed by Jon Beniston <jon@beniston.com>
3
4 Copyright (C) 2009, 2010 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
33 #include "insn-attr.h"
34 #include "insn-codes.h"
35 #include "recog.h"
36 #include "output.h"
37 #include "tree.h"
38 #include "expr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "tm_p.h"
42 #include "function.h"
43 #include "diagnostic-core.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "tm-constrs.h"
51 #include "df.h"
52
53 struct lm32_frame_info
54 {
55 HOST_WIDE_INT total_size; /* number of bytes of entire frame. */
56 HOST_WIDE_INT callee_size; /* number of bytes to save callee saves. */
57 HOST_WIDE_INT pretend_size; /* number of bytes we pretend caller did. */
58 HOST_WIDE_INT args_size; /* number of bytes for outgoing arguments. */
59 HOST_WIDE_INT locals_size; /* number of bytes for local variables. */
60 unsigned int reg_save_mask; /* mask of saved registers. */
61 };
62
63 /* Prototypes for static functions. */
64 static rtx emit_add (rtx dest, rtx src0, rtx src1);
65 static void expand_save_restore (struct lm32_frame_info *info, int op);
66 static void stack_adjust (HOST_WIDE_INT amount);
67 static bool lm32_in_small_data_p (const_tree);
68 static void lm32_setup_incoming_varargs (CUMULATIVE_ARGS * cum,
69 enum machine_mode mode, tree type,
70 int *pretend_size, int no_rtl);
71 static bool lm32_rtx_costs (rtx x, int code, int outer_code, int *total,
72 bool speed);
73 static bool lm32_can_eliminate (const int, const int);
74 static bool
75 lm32_legitimate_address_p (enum machine_mode mode, rtx x, bool strict);
76 static HOST_WIDE_INT lm32_compute_frame_size (int size);
77 static void lm32_option_override (void);
78 static rtx lm32_function_arg (CUMULATIVE_ARGS * cum,
79 enum machine_mode mode, const_tree type,
80 bool named);
81 static void lm32_function_arg_advance (CUMULATIVE_ARGS * cum,
82 enum machine_mode mode,
83 const_tree type, bool named);
84
85 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
86 static const struct default_options lm32_option_optimization_table[] =
87 {
88 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
89 { OPT_LEVELS_NONE, 0, NULL, 0 }
90 };
91
92 #undef TARGET_OPTION_OVERRIDE
93 #define TARGET_OPTION_OVERRIDE lm32_option_override
94 #undef TARGET_OPTION_OPTIMIZATION_TABLE
95 #define TARGET_OPTION_OPTIMIZATION_TABLE lm32_option_optimization_table
96 #undef TARGET_ADDRESS_COST
97 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
98 #undef TARGET_RTX_COSTS
99 #define TARGET_RTX_COSTS lm32_rtx_costs
100 #undef TARGET_IN_SMALL_DATA_P
101 #define TARGET_IN_SMALL_DATA_P lm32_in_small_data_p
102 #undef TARGET_PROMOTE_FUNCTION_MODE
103 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
104 #undef TARGET_SETUP_INCOMING_VARARGS
105 #define TARGET_SETUP_INCOMING_VARARGS lm32_setup_incoming_varargs
106 #undef TARGET_FUNCTION_ARG
107 #define TARGET_FUNCTION_ARG lm32_function_arg
108 #undef TARGET_FUNCTION_ARG_ADVANCE
109 #define TARGET_FUNCTION_ARG_ADVANCE lm32_function_arg_advance
110 #undef TARGET_PROMOTE_PROTOTYPES
111 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
112 #undef TARGET_MIN_ANCHOR_OFFSET
113 #define TARGET_MIN_ANCHOR_OFFSET -0x8000
114 #undef TARGET_MAX_ANCHOR_OFFSET
115 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
116 #undef TARGET_CAN_ELIMINATE
117 #define TARGET_CAN_ELIMINATE lm32_can_eliminate
118 #undef TARGET_LEGITIMATE_ADDRESS_P
119 #define TARGET_LEGITIMATE_ADDRESS_P lm32_legitimate_address_p
120
121 struct gcc_target targetm = TARGET_INITIALIZER;
122
123 /* Current frame information calculated by lm32_compute_frame_size. */
124 static struct lm32_frame_info current_frame_info;
125
126 /* Return non-zero if the given return type should be returned in memory. */
127
128 int
129 lm32_return_in_memory (tree type)
130 {
131 HOST_WIDE_INT size;
132
133 if (!AGGREGATE_TYPE_P (type))
134 {
135 /* All simple types are returned in registers. */
136 return 0;
137 }
138
139 size = int_size_in_bytes (type);
140 if (size >= 0 && size <= UNITS_PER_WORD)
141 {
142 /* If it can fit in one register. */
143 return 0;
144 }
145
146 return 1;
147 }
148
149 /* Generate an emit a word sized add instruction. */
150
151 static rtx
152 emit_add (rtx dest, rtx src0, rtx src1)
153 {
154 rtx insn;
155 insn = emit_insn (gen_addsi3 (dest, src0, src1));
156 return insn;
157 }
158
159 /* Generate the code to compare (and possibly branch) two integer values
160 TEST_CODE is the comparison code we are trying to emulate
161 (or implement directly)
162 RESULT is where to store the result of the comparison,
163 or null to emit a branch
164 CMP0 CMP1 are the two comparison operands
165 DESTINATION is the destination of the branch, or null to only compare
166 */
167
168 static void
169 gen_int_relational (enum rtx_code code,
170 rtx result,
171 rtx cmp0,
172 rtx cmp1,
173 rtx destination)
174 {
175 enum machine_mode mode;
176 int branch_p;
177
178 mode = GET_MODE (cmp0);
179 if (mode == VOIDmode)
180 mode = GET_MODE (cmp1);
181
182 /* Is this a branch or compare. */
183 branch_p = (destination != 0);
184
185 /* Instruction set doesn't support LE or LT, so swap operands and use
186 GE, GT. */
187 switch (code)
188 {
189 case LE:
190 case LT:
191 case LEU:
192 case LTU:
193 {
194 rtx temp;
195
196 code = swap_condition (code);
197 temp = cmp0;
198 cmp0 = cmp1;
199 cmp1 = temp;
200 break;
201 }
202 default:
203 break;
204 }
205
206 if (branch_p)
207 {
208 rtx insn, cond, label;
209
210 /* Operands must be in registers. */
211 if (!register_operand (cmp0, mode))
212 cmp0 = force_reg (mode, cmp0);
213 if (!register_operand (cmp1, mode))
214 cmp1 = force_reg (mode, cmp1);
215
216 /* Generate conditional branch instruction. */
217 cond = gen_rtx_fmt_ee (code, mode, cmp0, cmp1);
218 label = gen_rtx_LABEL_REF (VOIDmode, destination);
219 insn = gen_rtx_SET (VOIDmode, pc_rtx,
220 gen_rtx_IF_THEN_ELSE (VOIDmode,
221 cond, label, pc_rtx));
222 emit_jump_insn (insn);
223 }
224 else
225 {
226 /* We can't have const_ints in cmp0, other than 0. */
227 if ((GET_CODE (cmp0) == CONST_INT) && (INTVAL (cmp0) != 0))
228 cmp0 = force_reg (mode, cmp0);
229
230 /* If the comparison is against an int not in legal range
231 move it into a register. */
232 if (GET_CODE (cmp1) == CONST_INT)
233 {
234 switch (code)
235 {
236 case EQ:
237 case NE:
238 case LE:
239 case LT:
240 case GE:
241 case GT:
242 if (!satisfies_constraint_K (cmp1))
243 cmp1 = force_reg (mode, cmp1);
244 break;
245 case LEU:
246 case LTU:
247 case GEU:
248 case GTU:
249 if (!satisfies_constraint_L (cmp1))
250 cmp1 = force_reg (mode, cmp1);
251 break;
252 default:
253 gcc_unreachable ();
254 }
255 }
256
257 /* Generate compare instruction. */
258 emit_move_insn (result, gen_rtx_fmt_ee (code, mode, cmp0, cmp1));
259 }
260 }
261
262 /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
263 and OPERAND[3]. Store the result in OPERANDS[0]. */
264
265 void
266 lm32_expand_scc (rtx operands[])
267 {
268 rtx target = operands[0];
269 enum rtx_code code = GET_CODE (operands[1]);
270 rtx op0 = operands[2];
271 rtx op1 = operands[3];
272
273 gen_int_relational (code, target, op0, op1, NULL_RTX);
274 }
275
276 /* Compare OPERANDS[1] with OPERANDS[2] using comparison code
277 CODE and jump to OPERANDS[3] if the condition holds. */
278
279 void
280 lm32_expand_conditional_branch (rtx operands[])
281 {
282 enum rtx_code code = GET_CODE (operands[0]);
283 rtx op0 = operands[1];
284 rtx op1 = operands[2];
285 rtx destination = operands[3];
286
287 gen_int_relational (code, NULL_RTX, op0, op1, destination);
288 }
289
290 /* Generate and emit RTL to save or restore callee save registers. */
291 static void
292 expand_save_restore (struct lm32_frame_info *info, int op)
293 {
294 unsigned int reg_save_mask = info->reg_save_mask;
295 int regno;
296 HOST_WIDE_INT offset;
297 rtx insn;
298
299 /* Callee saves are below locals and above outgoing arguments. */
300 offset = info->args_size + info->callee_size;
301 for (regno = 0; regno <= 31; regno++)
302 {
303 if ((reg_save_mask & (1 << regno)) != 0)
304 {
305 rtx offset_rtx;
306 rtx mem;
307
308 offset_rtx = GEN_INT (offset);
309 if (satisfies_constraint_K (offset_rtx))
310 {
311 mem = gen_rtx_MEM (word_mode,
312 gen_rtx_PLUS (Pmode,
313 stack_pointer_rtx,
314 offset_rtx));
315 }
316 else
317 {
318 /* r10 is caller saved so it can be used as a temp reg. */
319 rtx r10;
320
321 r10 = gen_rtx_REG (word_mode, 10);
322 insn = emit_move_insn (r10, offset_rtx);
323 if (op == 0)
324 RTX_FRAME_RELATED_P (insn) = 1;
325 insn = emit_add (r10, r10, stack_pointer_rtx);
326 if (op == 0)
327 RTX_FRAME_RELATED_P (insn) = 1;
328 mem = gen_rtx_MEM (word_mode, r10);
329 }
330
331 if (op == 0)
332 insn = emit_move_insn (mem, gen_rtx_REG (word_mode, regno));
333 else
334 insn = emit_move_insn (gen_rtx_REG (word_mode, regno), mem);
335
336 /* only prologue instructions which set the sp fp or save a
337 register should be marked as frame related. */
338 if (op == 0)
339 RTX_FRAME_RELATED_P (insn) = 1;
340 offset -= UNITS_PER_WORD;
341 }
342 }
343 }
344
345 static void
346 stack_adjust (HOST_WIDE_INT amount)
347 {
348 rtx insn;
349
350 if (!IN_RANGE (amount, -32776, 32768))
351 {
352 /* r10 is caller saved so it can be used as a temp reg. */
353 rtx r10;
354 r10 = gen_rtx_REG (word_mode, 10);
355 insn = emit_move_insn (r10, GEN_INT (amount));
356 if (amount < 0)
357 RTX_FRAME_RELATED_P (insn) = 1;
358 insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, r10);
359 if (amount < 0)
360 RTX_FRAME_RELATED_P (insn) = 1;
361 }
362 else
363 {
364 insn = emit_add (stack_pointer_rtx,
365 stack_pointer_rtx, GEN_INT (amount));
366 if (amount < 0)
367 RTX_FRAME_RELATED_P (insn) = 1;
368 }
369 }
370
371
372 /* Create and emit instructions for a functions prologue. */
373 void
374 lm32_expand_prologue (void)
375 {
376 rtx insn;
377
378 lm32_compute_frame_size (get_frame_size ());
379
380 if (current_frame_info.total_size > 0)
381 {
382 /* Add space on stack new frame. */
383 stack_adjust (-current_frame_info.total_size);
384
385 /* Save callee save registers. */
386 if (current_frame_info.reg_save_mask != 0)
387 expand_save_restore (&current_frame_info, 0);
388
389 /* Setup frame pointer if it's needed. */
390 if (frame_pointer_needed == 1)
391 {
392 /* Load offset - Don't use total_size, as that includes pretend_size,
393 which isn't part of this frame? */
394 insn =
395 emit_move_insn (frame_pointer_rtx,
396 GEN_INT (current_frame_info.args_size +
397 current_frame_info.callee_size +
398 current_frame_info.locals_size));
399 RTX_FRAME_RELATED_P (insn) = 1;
400
401 /* Add in sp. */
402 insn = emit_add (frame_pointer_rtx,
403 frame_pointer_rtx, stack_pointer_rtx);
404 RTX_FRAME_RELATED_P (insn) = 1;
405 }
406
407 /* Prevent prologue from being scheduled into function body. */
408 emit_insn (gen_blockage ());
409 }
410 }
411
412 /* Create an emit instructions for a functions epilogue. */
413 void
414 lm32_expand_epilogue (void)
415 {
416 rtx ra_rtx = gen_rtx_REG (Pmode, RA_REGNUM);
417
418 lm32_compute_frame_size (get_frame_size ());
419
420 if (current_frame_info.total_size > 0)
421 {
422 /* Prevent stack code from being reordered. */
423 emit_insn (gen_blockage ());
424
425 /* Restore callee save registers. */
426 if (current_frame_info.reg_save_mask != 0)
427 expand_save_restore (&current_frame_info, 1);
428
429 /* Deallocate stack. */
430 stack_adjust (current_frame_info.total_size);
431
432 /* Return to calling function. */
433 emit_jump_insn (gen_return_internal (ra_rtx));
434 }
435 else
436 {
437 /* Return to calling function. */
438 emit_jump_insn (gen_return_internal (ra_rtx));
439 }
440 }
441
442 /* Return the bytes needed to compute the frame pointer from the current
443 stack pointer. */
444 static HOST_WIDE_INT
445 lm32_compute_frame_size (int size)
446 {
447 int regno;
448 HOST_WIDE_INT total_size, locals_size, args_size, pretend_size, callee_size;
449 unsigned int reg_save_mask;
450
451 locals_size = size;
452 args_size = crtl->outgoing_args_size;
453 pretend_size = crtl->args.pretend_args_size;
454 callee_size = 0;
455 reg_save_mask = 0;
456
457 /* Build mask that actually determines which regsiters we save
458 and calculate size required to store them in the stack. */
459 for (regno = 1; regno < SP_REGNUM; regno++)
460 {
461 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
462 {
463 reg_save_mask |= 1 << regno;
464 callee_size += UNITS_PER_WORD;
465 }
466 }
467 if (df_regs_ever_live_p (RA_REGNUM) || !current_function_is_leaf
468 || !optimize)
469 {
470 reg_save_mask |= 1 << RA_REGNUM;
471 callee_size += UNITS_PER_WORD;
472 }
473 if (!(reg_save_mask & (1 << FP_REGNUM)) && frame_pointer_needed)
474 {
475 reg_save_mask |= 1 << FP_REGNUM;
476 callee_size += UNITS_PER_WORD;
477 }
478
479 /* Compute total frame size. */
480 total_size = pretend_size + args_size + locals_size + callee_size;
481
482 /* Align frame to appropriate boundary. */
483 total_size = (total_size + 3) & ~3;
484
485 /* Save computed information. */
486 current_frame_info.total_size = total_size;
487 current_frame_info.callee_size = callee_size;
488 current_frame_info.pretend_size = pretend_size;
489 current_frame_info.locals_size = locals_size;
490 current_frame_info.args_size = args_size;
491 current_frame_info.reg_save_mask = reg_save_mask;
492
493 return total_size;
494 }
495
496 void
497 lm32_print_operand (FILE * file, rtx op, int letter)
498 {
499 enum rtx_code code;
500
501 code = GET_CODE (op);
502
503 if (code == SIGN_EXTEND)
504 op = XEXP (op, 0), code = GET_CODE (op);
505 else if (code == REG || code == SUBREG)
506 {
507 int regnum;
508
509 if (code == REG)
510 regnum = REGNO (op);
511 else
512 regnum = true_regnum (op);
513
514 fprintf (file, "%s", reg_names[regnum]);
515 }
516 else if (code == HIGH)
517 output_addr_const (file, XEXP (op, 0));
518 else if (code == MEM)
519 output_address (XEXP (op, 0));
520 else if (letter == 'z' && GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
521 fprintf (file, "%s", reg_names[0]);
522 else if (GET_CODE (op) == CONST_DOUBLE)
523 {
524 if ((CONST_DOUBLE_LOW (op) != 0) || (CONST_DOUBLE_HIGH (op) != 0))
525 output_operand_lossage ("only 0.0 can be loaded as an immediate");
526 else
527 fprintf (file, "0");
528 }
529 else if (code == EQ)
530 fprintf (file, "e ");
531 else if (code == NE)
532 fprintf (file, "ne ");
533 else if (code == GT)
534 fprintf (file, "g ");
535 else if (code == GTU)
536 fprintf (file, "gu ");
537 else if (code == LT)
538 fprintf (file, "l ");
539 else if (code == LTU)
540 fprintf (file, "lu ");
541 else if (code == GE)
542 fprintf (file, "ge ");
543 else if (code == GEU)
544 fprintf (file, "geu");
545 else if (code == LE)
546 fprintf (file, "le ");
547 else if (code == LEU)
548 fprintf (file, "leu");
549 else
550 output_addr_const (file, op);
551 }
552
553 /* A C compound statement to output to stdio stream STREAM the
554 assembler syntax for an instruction operand that is a memory
555 reference whose address is ADDR. ADDR is an RTL expression.
556
557 On some machines, the syntax for a symbolic address depends on
558 the section that the address refers to. On these machines,
559 define the macro `ENCODE_SECTION_INFO' to store the information
560 into the `symbol_ref', and then check for it here. */
561
562 void
563 lm32_print_operand_address (FILE * file, rtx addr)
564 {
565 switch (GET_CODE (addr))
566 {
567 case REG:
568 fprintf (file, "(%s+0)", reg_names[REGNO (addr)]);
569 break;
570
571 case MEM:
572 output_address (XEXP (addr, 0));
573 break;
574
575 case PLUS:
576 {
577 rtx arg0 = XEXP (addr, 0);
578 rtx arg1 = XEXP (addr, 1);
579
580 if (GET_CODE (arg0) == REG && CONSTANT_P (arg1))
581 {
582 if (GET_CODE (arg1) == CONST_INT)
583 fprintf (file, "(%s+%ld)", reg_names[REGNO (arg0)],
584 INTVAL (arg1));
585 else
586 {
587 fprintf (file, "(%s+", reg_names[REGNO (arg0)]);
588 output_addr_const (file, arg1);
589 fprintf (file, ")");
590 }
591 }
592 else if (CONSTANT_P (arg0) && CONSTANT_P (arg1))
593 output_addr_const (file, addr);
594 else
595 fatal_insn ("bad operand", addr);
596 }
597 break;
598
599 case SYMBOL_REF:
600 if (SYMBOL_REF_SMALL_P (addr))
601 {
602 fprintf (file, "gp(");
603 output_addr_const (file, addr);
604 fprintf (file, ")");
605 }
606 else
607 fatal_insn ("can't use non gp relative absolute address", addr);
608 break;
609
610 default:
611 fatal_insn ("invalid addressing mode", addr);
612 break;
613 }
614 }
615
616 /* Determine where to put an argument to a function.
617 Value is zero to push the argument on the stack,
618 or a hard register in which to store the argument.
619
620 MODE is the argument's machine mode.
621 TYPE is the data type of the argument (as a tree).
622 This is null for libcalls where that information may
623 not be available.
624 CUM is a variable of type CUMULATIVE_ARGS which gives info about
625 the preceding args and about the function being called.
626 NAMED is nonzero if this argument is a named parameter
627 (otherwise it is an extra parameter matching an ellipsis). */
628
629 static rtx
630 lm32_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
631 const_tree type, bool named)
632 {
633 if (mode == VOIDmode)
634 /* Compute operand 2 of the call insn. */
635 return GEN_INT (0);
636
637 if (targetm.calls.must_pass_in_stack (mode, type))
638 return NULL_RTX;
639
640 if (!named || (*cum + LM32_NUM_REGS2 (mode, type) > LM32_NUM_ARG_REGS))
641 return NULL_RTX;
642
643 return gen_rtx_REG (mode, *cum + LM32_FIRST_ARG_REG);
644 }
645
646 static void
647 lm32_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
648 const_tree type, bool named ATTRIBUTE_UNUSED)
649 {
650 *cum += LM32_NUM_REGS2 (mode, type);
651 }
652
653 HOST_WIDE_INT
654 lm32_compute_initial_elimination_offset (int from, int to)
655 {
656 HOST_WIDE_INT offset = 0;
657
658 switch (from)
659 {
660 case ARG_POINTER_REGNUM:
661 switch (to)
662 {
663 case FRAME_POINTER_REGNUM:
664 offset = 0;
665 break;
666 case STACK_POINTER_REGNUM:
667 offset =
668 lm32_compute_frame_size (get_frame_size ()) -
669 current_frame_info.pretend_size;
670 break;
671 default:
672 gcc_unreachable ();
673 }
674 break;
675 default:
676 gcc_unreachable ();
677 }
678
679 return offset;
680 }
681
682 static void
683 lm32_setup_incoming_varargs (CUMULATIVE_ARGS * cum, enum machine_mode mode,
684 tree type, int *pretend_size, int no_rtl)
685 {
686 int first_anon_arg;
687 tree fntype;
688
689 fntype = TREE_TYPE (current_function_decl);
690
691 if (stdarg_p (fntype))
692 first_anon_arg = *cum + LM32_FIRST_ARG_REG;
693 else
694 {
695 /* this is the common case, we have been passed details setup
696 for the last named argument, we want to skip over the
697 registers, if any used in passing this named paramter in
698 order to determine which is the first registers used to pass
699 anonymous arguments. */
700 int size;
701
702 if (mode == BLKmode)
703 size = int_size_in_bytes (type);
704 else
705 size = GET_MODE_SIZE (mode);
706
707 first_anon_arg =
708 *cum + LM32_FIRST_ARG_REG +
709 ((size + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
710 }
711
712 if ((first_anon_arg < (LM32_FIRST_ARG_REG + LM32_NUM_ARG_REGS)) && !no_rtl)
713 {
714 int first_reg_offset = first_anon_arg;
715 int size = LM32_FIRST_ARG_REG + LM32_NUM_ARG_REGS - first_anon_arg;
716 rtx regblock;
717
718 regblock = gen_rtx_MEM (BLKmode,
719 plus_constant (arg_pointer_rtx,
720 FIRST_PARM_OFFSET (0)));
721 move_block_from_reg (first_reg_offset, regblock, size);
722
723 *pretend_size = size * UNITS_PER_WORD;
724 }
725 }
726
727 /* Override command line options. */
728 static void
729 lm32_option_override (void)
730 {
731 /* We must have sign-extend enabled if barrel-shift isn't. */
732 if (!TARGET_BARREL_SHIFT_ENABLED && !TARGET_SIGN_EXTEND_ENABLED)
733 target_flags |= MASK_SIGN_EXTEND_ENABLED;
734 }
735
736 /* Return nonzero if this function is known to have a null epilogue.
737 This allows the optimizer to omit jumps to jumps if no stack
738 was created. */
739 int
740 lm32_can_use_return (void)
741 {
742 if (!reload_completed)
743 return 0;
744
745 if (df_regs_ever_live_p (RA_REGNUM) || crtl->profile)
746 return 0;
747
748 if (lm32_compute_frame_size (get_frame_size ()) != 0)
749 return 0;
750
751 return 1;
752 }
753
754 /* Support function to determine the return address of the function
755 'count' frames back up the stack. */
756 rtx
757 lm32_return_addr_rtx (int count, rtx frame)
758 {
759 rtx r;
760 if (count == 0)
761 {
762 if (!df_regs_ever_live_p (RA_REGNUM))
763 r = gen_rtx_REG (Pmode, RA_REGNUM);
764 else
765 {
766 r = gen_rtx_MEM (Pmode,
767 gen_rtx_PLUS (Pmode, frame,
768 GEN_INT (-2 * UNITS_PER_WORD)));
769 set_mem_alias_set (r, get_frame_alias_set ());
770 }
771 }
772 else if (flag_omit_frame_pointer)
773 r = NULL_RTX;
774 else
775 {
776 r = gen_rtx_MEM (Pmode,
777 gen_rtx_PLUS (Pmode, frame,
778 GEN_INT (-2 * UNITS_PER_WORD)));
779 set_mem_alias_set (r, get_frame_alias_set ());
780 }
781 return r;
782 }
783
784 /* Return true if EXP should be placed in the small data section. */
785
786 static bool
787 lm32_in_small_data_p (const_tree exp)
788 {
789 /* We want to merge strings, so we never consider them small data. */
790 if (TREE_CODE (exp) == STRING_CST)
791 return false;
792
793 /* Functions are never in the small data area. Duh. */
794 if (TREE_CODE (exp) == FUNCTION_DECL)
795 return false;
796
797 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
798 {
799 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
800 if (strcmp (section, ".sdata") == 0 || strcmp (section, ".sbss") == 0)
801 return true;
802 }
803 else
804 {
805 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
806
807 /* If this is an incomplete type with size 0, then we can't put it
808 in sdata because it might be too big when completed. */
809 if (size > 0 && size <= g_switch_value)
810 return true;
811 }
812
813 return false;
814 }
815
816 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
817 Assume that the areas do not overlap. */
818
819 static void
820 lm32_block_move_inline (rtx dest, rtx src, HOST_WIDE_INT length,
821 HOST_WIDE_INT alignment)
822 {
823 HOST_WIDE_INT offset, delta;
824 unsigned HOST_WIDE_INT bits;
825 int i;
826 enum machine_mode mode;
827 rtx *regs;
828
829 /* Work out how many bits to move at a time. */
830 switch (alignment)
831 {
832 case 1:
833 bits = 8;
834 break;
835 case 2:
836 bits = 16;
837 break;
838 default:
839 bits = 32;
840 break;
841 }
842
843 mode = mode_for_size (bits, MODE_INT, 0);
844 delta = bits / BITS_PER_UNIT;
845
846 /* Allocate a buffer for the temporary registers. */
847 regs = XALLOCAVEC (rtx, length / delta);
848
849 /* Load as many BITS-sized chunks as possible. */
850 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
851 {
852 regs[i] = gen_reg_rtx (mode);
853 emit_move_insn (regs[i], adjust_address (src, mode, offset));
854 }
855
856 /* Copy the chunks to the destination. */
857 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
858 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
859
860 /* Mop up any left-over bytes. */
861 if (offset < length)
862 {
863 src = adjust_address (src, BLKmode, offset);
864 dest = adjust_address (dest, BLKmode, offset);
865 move_by_pieces (dest, src, length - offset,
866 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
867 }
868 }
869
870 /* Expand string/block move operations.
871
872 operands[0] is the pointer to the destination.
873 operands[1] is the pointer to the source.
874 operands[2] is the number of bytes to move.
875 operands[3] is the alignment. */
876
877 int
878 lm32_expand_block_move (rtx * operands)
879 {
880 if ((GET_CODE (operands[2]) == CONST_INT) && (INTVAL (operands[2]) <= 32))
881 {
882 lm32_block_move_inline (operands[0], operands[1], INTVAL (operands[2]),
883 INTVAL (operands[3]));
884 return 1;
885 }
886 return 0;
887 }
888
889 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
890 isn't protected by a PIC unspec. */
891 int
892 nonpic_symbol_mentioned_p (rtx x)
893 {
894 const char *fmt;
895 int i;
896
897 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
898 || GET_CODE (x) == PC)
899 return 1;
900
901 /* We don't want to look into the possible MEM location of a
902 CONST_DOUBLE, since we're not going to use it, in general. */
903 if (GET_CODE (x) == CONST_DOUBLE)
904 return 0;
905
906 if (GET_CODE (x) == UNSPEC)
907 return 0;
908
909 fmt = GET_RTX_FORMAT (GET_CODE (x));
910 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
911 {
912 if (fmt[i] == 'E')
913 {
914 int j;
915
916 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
917 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
918 return 1;
919 }
920 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
921 return 1;
922 }
923
924 return 0;
925 }
926
927 /* Compute a (partial) cost for rtx X. Return true if the complete
928 cost has been computed, and false if subexpressions should be
929 scanned. In either case, *TOTAL contains the cost result. */
930
931 static bool
932 lm32_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed)
933 {
934 enum machine_mode mode = GET_MODE (x);
935 bool small_mode;
936
937 const int arithmetic_latency = 1;
938 const int shift_latency = 1;
939 const int compare_latency = 2;
940 const int multiply_latency = 3;
941 const int load_latency = 3;
942 const int libcall_size_cost = 5;
943
944 /* Determine if we can handle the given mode size in a single instruction. */
945 small_mode = (mode == QImode) || (mode == HImode) || (mode == SImode);
946
947 switch (code)
948 {
949
950 case PLUS:
951 case MINUS:
952 case AND:
953 case IOR:
954 case XOR:
955 case NOT:
956 case NEG:
957 if (!speed)
958 *total = COSTS_N_INSNS (LM32_NUM_REGS (mode));
959 else
960 *total =
961 COSTS_N_INSNS (arithmetic_latency + (LM32_NUM_REGS (mode) - 1));
962 break;
963
964 case COMPARE:
965 if (small_mode)
966 {
967 if (!speed)
968 *total = COSTS_N_INSNS (1);
969 else
970 *total = COSTS_N_INSNS (compare_latency);
971 }
972 else
973 {
974 /* FIXME. Guessing here. */
975 *total = COSTS_N_INSNS (LM32_NUM_REGS (mode) * (2 + 3) / 2);
976 }
977 break;
978
979 case ASHIFT:
980 case ASHIFTRT:
981 case LSHIFTRT:
982 if (TARGET_BARREL_SHIFT_ENABLED && small_mode)
983 {
984 if (!speed)
985 *total = COSTS_N_INSNS (1);
986 else
987 *total = COSTS_N_INSNS (shift_latency);
988 }
989 else if (TARGET_BARREL_SHIFT_ENABLED)
990 {
991 /* FIXME: Guessing here. */
992 *total = COSTS_N_INSNS (LM32_NUM_REGS (mode) * 4);
993 }
994 else if (small_mode && GET_CODE (XEXP (x, 1)) == CONST_INT)
995 {
996 *total = COSTS_N_INSNS (INTVAL (XEXP (x, 1)));
997 }
998 else
999 {
1000 /* Libcall. */
1001 if (!speed)
1002 *total = COSTS_N_INSNS (libcall_size_cost);
1003 else
1004 *total = COSTS_N_INSNS (100);
1005 }
1006 break;
1007
1008 case MULT:
1009 if (TARGET_MULTIPLY_ENABLED && small_mode)
1010 {
1011 if (!speed)
1012 *total = COSTS_N_INSNS (1);
1013 else
1014 *total = COSTS_N_INSNS (multiply_latency);
1015 }
1016 else
1017 {
1018 /* Libcall. */
1019 if (!speed)
1020 *total = COSTS_N_INSNS (libcall_size_cost);
1021 else
1022 *total = COSTS_N_INSNS (100);
1023 }
1024 break;
1025
1026 case DIV:
1027 case MOD:
1028 case UDIV:
1029 case UMOD:
1030 if (TARGET_DIVIDE_ENABLED && small_mode)
1031 {
1032 if (!speed)
1033 *total = COSTS_N_INSNS (1);
1034 else
1035 {
1036 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
1037 {
1038 int cycles = 0;
1039 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
1040
1041 while (i)
1042 {
1043 i >>= 2;
1044 cycles++;
1045 }
1046 if (IN_RANGE (i, 0, 65536))
1047 *total = COSTS_N_INSNS (1 + 1 + cycles);
1048 else
1049 *total = COSTS_N_INSNS (2 + 1 + cycles);
1050 return true;
1051 }
1052 else if (GET_CODE (XEXP (x, 1)) == REG)
1053 {
1054 *total = COSTS_N_INSNS (1 + GET_MODE_SIZE (mode) / 2);
1055 return true;
1056 }
1057 else
1058 {
1059 *total = COSTS_N_INSNS (1 + GET_MODE_SIZE (mode) / 2);
1060 return false;
1061 }
1062 }
1063 }
1064 else
1065 {
1066 /* Libcall. */
1067 if (!speed)
1068 *total = COSTS_N_INSNS (libcall_size_cost);
1069 else
1070 *total = COSTS_N_INSNS (100);
1071 }
1072 break;
1073
1074 case HIGH:
1075 case LO_SUM:
1076 if (!speed)
1077 *total = COSTS_N_INSNS (1);
1078 else
1079 *total = COSTS_N_INSNS (arithmetic_latency);
1080 break;
1081
1082 case ZERO_EXTEND:
1083 if (MEM_P (XEXP (x, 0)))
1084 *total = COSTS_N_INSNS (0);
1085 else if (small_mode)
1086 {
1087 if (!speed)
1088 *total = COSTS_N_INSNS (1);
1089 else
1090 *total = COSTS_N_INSNS (arithmetic_latency);
1091 }
1092 else
1093 *total = COSTS_N_INSNS (LM32_NUM_REGS (mode) / 2);
1094 break;
1095
1096 case CONST_INT:
1097 {
1098 switch (outer_code)
1099 {
1100 case HIGH:
1101 case LO_SUM:
1102 *total = COSTS_N_INSNS (0);
1103 return true;
1104
1105 case AND:
1106 case XOR:
1107 case IOR:
1108 case ASHIFT:
1109 case ASHIFTRT:
1110 case LSHIFTRT:
1111 case ROTATE:
1112 case ROTATERT:
1113 if (satisfies_constraint_L (x))
1114 *total = COSTS_N_INSNS (0);
1115 else
1116 *total = COSTS_N_INSNS (2);
1117 return true;
1118
1119 case SET:
1120 case PLUS:
1121 case MINUS:
1122 case COMPARE:
1123 if (satisfies_constraint_K (x))
1124 *total = COSTS_N_INSNS (0);
1125 else
1126 *total = COSTS_N_INSNS (2);
1127 return true;
1128
1129 case MULT:
1130 if (TARGET_MULTIPLY_ENABLED)
1131 {
1132 if (satisfies_constraint_K (x))
1133 *total = COSTS_N_INSNS (0);
1134 else
1135 *total = COSTS_N_INSNS (2);
1136 return true;
1137 }
1138 /* Fall through. */
1139
1140 default:
1141 if (satisfies_constraint_K (x))
1142 *total = COSTS_N_INSNS (1);
1143 else
1144 *total = COSTS_N_INSNS (2);
1145 return true;
1146 }
1147 }
1148
1149 case SYMBOL_REF:
1150 case CONST:
1151 switch (outer_code)
1152 {
1153 case HIGH:
1154 case LO_SUM:
1155 *total = COSTS_N_INSNS (0);
1156 return true;
1157
1158 case MEM:
1159 case SET:
1160 if (g_switch_value)
1161 {
1162 *total = COSTS_N_INSNS (0);
1163 return true;
1164 }
1165 break;
1166 }
1167 /* Fall through. */
1168
1169 case LABEL_REF:
1170 case CONST_DOUBLE:
1171 *total = COSTS_N_INSNS (2);
1172 return true;
1173
1174 case SET:
1175 *total = COSTS_N_INSNS (1);
1176 break;
1177
1178 case MEM:
1179 if (!speed)
1180 *total = COSTS_N_INSNS (1);
1181 else
1182 *total = COSTS_N_INSNS (load_latency);
1183 break;
1184
1185 }
1186
1187 return false;
1188 }
1189
1190 /* Implemenent TARGET_CAN_ELIMINATE. */
1191
1192 bool
1193 lm32_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
1194 {
1195 return (to == STACK_POINTER_REGNUM && frame_pointer_needed) ? false : true;
1196 }
1197
1198 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
1199
1200 static bool
1201 lm32_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict)
1202 {
1203 /* (rM) */
1204 if (strict && REG_P (x) && STRICT_REG_OK_FOR_BASE_P (x))
1205 return true;
1206 if (!strict && REG_P (x) && NONSTRICT_REG_OK_FOR_BASE_P (x))
1207 return true;
1208
1209 /* (rM)+literal) */
1210 if (GET_CODE (x) == PLUS
1211 && REG_P (XEXP (x, 0))
1212 && ((strict && STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0)))
1213 || (!strict && NONSTRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))))
1214 && GET_CODE (XEXP (x, 1)) == CONST_INT
1215 && satisfies_constraint_K (XEXP ((x), 1)))
1216 return true;
1217
1218 /* gp(sym) */
1219 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_SMALL_P (x))
1220 return true;
1221
1222 return false;
1223 }
1224
1225 /* Check a move is not memory to memory. */
1226
1227 bool
1228 lm32_move_ok (enum machine_mode mode, rtx operands[2]) {
1229 if (memory_operand (operands[0], mode))
1230 return register_or_zero_operand (operands[1], mode);
1231 return true;
1232 }
1233
1234 /* Implement LEGITIMATE_CONSTANT_P. */
1235
1236 bool
1237 lm32_legitimate_constant_p (rtx x)
1238 {
1239 /* 32-bit addresses require multiple instructions. */
1240 if (!flag_pic && reloc_operand (x, GET_MODE (x)))
1241 return false;
1242
1243 return true;
1244 }