1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* ------------------------------------------------------------------------ */
25 #include "coretypes.h"
30 #include "stringpool.h"
35 #include "optabs.h" /* For GEN_FCN. */
39 #include "diagnostic-core.h"
40 #include "stor-layout.h"
46 #include "tm-constrs.h"
49 /* This file should be included last. */
50 #include "target-def.h"
52 /* ------------------------------------------------------------------------ */
54 /* This file is divided into five parts:
56 PART 1: Auxiliary static variable definitions and
57 target hook static variable definitions.
59 PART 2: Auxiliary static function definitions.
61 PART 3: Implement target hook stuff definitions.
63 PART 4: Implemet extern function definitions,
64 the prototype is in nds32-protos.h.
66 PART 5: Initialize target hook structure and definitions. */
68 /* ------------------------------------------------------------------------ */
70 /* PART 1: Auxiliary static variable definitions and
71 target hook static variable definitions. */
73 /* Define intrinsic register names.
74 Please refer to nds32_intrinsic.h file, the index is corresponding to
75 'enum nds32_intrinsic_registers' data type values.
76 NOTE that the base value starting from 1024. */
77 static const char * const nds32_intrinsic_register_names
[] =
79 "$PSW", "$IPSW", "$ITYPE", "$IPC"
82 /* Defining target-specific uses of __attribute__. */
83 static const struct attribute_spec nds32_attribute_table
[] =
85 /* Syntax: { name, min_len, max_len, decl_required, type_required,
86 function_type_required, handler, affects_type_identity } */
88 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
89 { "interrupt", 1, 64, false, false, false, NULL
, false },
90 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
91 { "exception", 1, 8, false, false, false, NULL
, false },
92 /* Argument is user's interrupt numbers. The vector number is always 0. */
93 { "reset", 1, 1, false, false, false, NULL
, false },
95 /* The attributes describing isr nested type. */
96 { "nested", 0, 0, false, false, false, NULL
, false },
97 { "not_nested", 0, 0, false, false, false, NULL
, false },
98 { "nested_ready", 0, 0, false, false, false, NULL
, false },
100 /* The attributes describing isr register save scheme. */
101 { "save_all", 0, 0, false, false, false, NULL
, false },
102 { "partial_save", 0, 0, false, false, false, NULL
, false },
104 /* The attributes used by reset attribute. */
105 { "nmi", 1, 1, false, false, false, NULL
, false },
106 { "warm", 1, 1, false, false, false, NULL
, false },
108 /* The attribute telling no prologue/epilogue. */
109 { "naked", 0, 0, false, false, false, NULL
, false },
111 /* The last attribute spec is set to be NULL. */
112 { NULL
, 0, 0, false, false, false, NULL
, false }
116 /* ------------------------------------------------------------------------ */
118 /* PART 2: Auxiliary static function definitions. */
120 /* Function to save and restore machine-specific function data. */
121 static struct machine_function
*
122 nds32_init_machine_status (void)
124 struct machine_function
*machine
;
125 machine
= ggc_cleared_alloc
<machine_function
> ();
127 /* Initially assume this function needs prologue/epilogue. */
128 machine
->naked_p
= 0;
130 /* Initially assume this function does NOT use fp_as_gp optimization. */
131 machine
->fp_as_gp_p
= 0;
136 /* Function to compute stack frame size and
137 store into cfun->machine structure. */
139 nds32_compute_stack_frame (void)
144 /* Because nds32_compute_stack_frame() will be called from different place,
145 everytime we enter this function, we have to assume this function
146 needs prologue/epilogue. */
147 cfun
->machine
->naked_p
= 0;
149 /* Get variadic arguments size to prepare pretend arguments and
150 we will push them into stack at prologue by ourself. */
151 cfun
->machine
->va_args_size
= crtl
->args
.pretend_args_size
;
152 if (cfun
->machine
->va_args_size
!= 0)
154 cfun
->machine
->va_args_first_regno
155 = NDS32_GPR_ARG_FIRST_REGNUM
156 + NDS32_MAX_GPR_REGS_FOR_ARGS
157 - (crtl
->args
.pretend_args_size
/ UNITS_PER_WORD
);
158 cfun
->machine
->va_args_last_regno
159 = NDS32_GPR_ARG_FIRST_REGNUM
+ NDS32_MAX_GPR_REGS_FOR_ARGS
- 1;
163 cfun
->machine
->va_args_first_regno
= SP_REGNUM
;
164 cfun
->machine
->va_args_last_regno
= SP_REGNUM
;
167 /* Important: We need to make sure that varargs area is 8-byte alignment. */
168 block_size
= cfun
->machine
->va_args_size
;
169 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size
))
171 cfun
->machine
->va_args_area_padding_bytes
172 = NDS32_ROUND_UP_DOUBLE_WORD (block_size
) - block_size
;
175 /* Get local variables, incoming variables, and temporary variables size.
176 Note that we need to make sure it is 8-byte alignment because
177 there may be no padding bytes if we are using LRA. */
178 cfun
->machine
->local_size
= NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
180 /* Get outgoing arguments size. */
181 cfun
->machine
->out_args_size
= crtl
->outgoing_args_size
;
183 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
184 Check whether $fp is ever live. */
185 cfun
->machine
->fp_size
= (df_regs_ever_live_p (FP_REGNUM
)) ? 4 : 0;
187 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
188 Check whether we are using PIC code genration. */
189 cfun
->machine
->gp_size
= (flag_pic
) ? 4 : 0;
191 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
192 Check whether $lp is ever live. */
193 cfun
->machine
->lp_size
= (df_regs_ever_live_p (LP_REGNUM
)) ? 4 : 0;
195 /* Initially there is no padding bytes. */
196 cfun
->machine
->callee_saved_area_gpr_padding_bytes
= 0;
198 /* Calculate the bytes of saving callee-saved registers on stack. */
199 cfun
->machine
->callee_saved_gpr_regs_size
= 0;
200 cfun
->machine
->callee_saved_first_gpr_regno
= SP_REGNUM
;
201 cfun
->machine
->callee_saved_last_gpr_regno
= SP_REGNUM
;
202 /* Currently, there is no need to check $r28~$r31
203 because we will save them in another way. */
204 for (r
= 0; r
< 28; r
++)
206 if (NDS32_REQUIRED_CALLEE_SAVED_P (r
))
208 /* Mark the first required callee-saved register
209 (only need to set it once).
210 If first regno == SP_REGNUM, we can tell that
211 it is the first time to be here. */
212 if (cfun
->machine
->callee_saved_first_gpr_regno
== SP_REGNUM
)
213 cfun
->machine
->callee_saved_first_gpr_regno
= r
;
214 /* Mark the last required callee-saved register. */
215 cfun
->machine
->callee_saved_last_gpr_regno
= r
;
219 /* Check if this function can omit prologue/epilogue code fragment.
220 If there is 'naked' attribute in this function,
221 we can set 'naked_p' flag to indicate that
222 we do not have to generate prologue/epilogue.
223 Or, if all the following conditions succeed,
224 we can set this function 'naked_p' as well:
225 condition 1: first_regno == last_regno == SP_REGNUM,
226 which means we do not have to save
227 any callee-saved registers.
228 condition 2: Both $lp and $fp are NOT live in this function,
229 which means we do not need to save them and there
231 condition 3: There is no local_size, which means
232 we do not need to adjust $sp. */
233 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl
))
234 || (cfun
->machine
->callee_saved_first_gpr_regno
== SP_REGNUM
235 && cfun
->machine
->callee_saved_last_gpr_regno
== SP_REGNUM
236 && !df_regs_ever_live_p (FP_REGNUM
)
237 && !df_regs_ever_live_p (LP_REGNUM
)
238 && cfun
->machine
->local_size
== 0))
240 /* Set this function 'naked_p' and other functions can check this flag.
241 Note that in nds32 port, the 'naked_p = 1' JUST means there is no
242 callee-saved, local size, and outgoing size.
243 The varargs space and ret instruction may still present in
244 the prologue/epilogue expanding. */
245 cfun
->machine
->naked_p
= 1;
247 /* No need to save $fp, $gp, and $lp.
248 We should set these value to be zero
249 so that nds32_initial_elimination_offset() can work properly. */
250 cfun
->machine
->fp_size
= 0;
251 cfun
->machine
->gp_size
= 0;
252 cfun
->machine
->lp_size
= 0;
254 /* If stack usage computation is required,
255 we need to provide the static stack size. */
256 if (flag_stack_usage_info
)
257 current_function_static_stack_size
= 0;
259 /* No need to do following adjustment, return immediately. */
263 /* Adjustment for v3push instructions:
264 If we are using v3push (push25/pop25) instructions,
265 we need to make sure Rb is $r6 and Re is
266 located on $r6, $r8, $r10, or $r14.
267 Some results above will be discarded and recomputed.
268 Note that it is only available under V3/V3M ISA and we
269 DO NOT setup following stuff for isr or variadic function. */
271 && !nds32_isr_function_p (current_function_decl
)
272 && (cfun
->machine
->va_args_size
== 0))
275 cfun->machine->fp_size
276 cfun->machine->gp_size
277 cfun->machine->lp_size
278 cfun->machine->callee_saved_first_gpr_regno
279 cfun->machine->callee_saved_last_gpr_regno */
281 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
282 cfun
->machine
->fp_size
= 4;
283 cfun
->machine
->gp_size
= 4;
284 cfun
->machine
->lp_size
= 4;
286 /* Remember to set Rb = $r6. */
287 cfun
->machine
->callee_saved_first_gpr_regno
= 6;
289 if (cfun
->machine
->callee_saved_last_gpr_regno
<= 6)
292 cfun
->machine
->callee_saved_last_gpr_regno
= 6;
294 else if (cfun
->machine
->callee_saved_last_gpr_regno
<= 8)
297 cfun
->machine
->callee_saved_last_gpr_regno
= 8;
299 else if (cfun
->machine
->callee_saved_last_gpr_regno
<= 10)
302 cfun
->machine
->callee_saved_last_gpr_regno
= 10;
304 else if (cfun
->machine
->callee_saved_last_gpr_regno
<= 14)
307 cfun
->machine
->callee_saved_last_gpr_regno
= 14;
309 else if (cfun
->machine
->callee_saved_last_gpr_regno
== SP_REGNUM
)
311 /* If last_regno is SP_REGNUM, which means
312 it is never changed, so set it to Re = $r6. */
313 cfun
->machine
->callee_saved_last_gpr_regno
= 6;
317 /* The program flow should not go here. */
322 /* We have correctly set callee_saved_first_gpr_regno
323 and callee_saved_last_gpr_regno.
324 Initially, the callee_saved_gpr_regs_size is supposed to be 0.
325 As long as callee_saved_last_gpr_regno is not SP_REGNUM,
326 we can update callee_saved_gpr_regs_size with new size. */
327 if (cfun
->machine
->callee_saved_last_gpr_regno
!= SP_REGNUM
)
329 /* Compute pushed size of callee-saved registers. */
330 cfun
->machine
->callee_saved_gpr_regs_size
331 = 4 * (cfun
->machine
->callee_saved_last_gpr_regno
332 - cfun
->machine
->callee_saved_first_gpr_regno
336 /* Important: We need to make sure that
337 (fp_size + gp_size + lp_size + callee_saved_gpr_regs_size)
339 If it is not, calculate the padding bytes. */
340 block_size
= cfun
->machine
->fp_size
341 + cfun
->machine
->gp_size
342 + cfun
->machine
->lp_size
343 + cfun
->machine
->callee_saved_gpr_regs_size
;
344 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size
))
346 cfun
->machine
->callee_saved_area_gpr_padding_bytes
347 = NDS32_ROUND_UP_DOUBLE_WORD (block_size
) - block_size
;
350 /* If stack usage computation is required,
351 we need to provide the static stack size. */
352 if (flag_stack_usage_info
)
354 current_function_static_stack_size
355 = NDS32_ROUND_UP_DOUBLE_WORD (block_size
)
356 + cfun
->machine
->local_size
357 + cfun
->machine
->out_args_size
;
361 /* Function to create a parallel rtx pattern
362 which presents stack push multiple behavior.
363 The overall concept are:
364 "push registers to memory",
365 "adjust stack pointer". */
367 nds32_emit_stack_push_multiple (rtx Rb
, rtx Re
, rtx En4
, bool vaarg_p
)
374 int save_fp
, save_gp
, save_lp
;
383 /* We need to provide a customized rtx which contains
384 necessary information for data analysis,
385 so we create a parallel rtx like this:
386 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
388 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
391 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
393 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
395 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
397 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
399 (set (reg:SI SP_REGNUM)
400 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
402 /* Determine whether we need to save $fp, $gp, or $lp. */
403 save_fp
= INTVAL (En4
) & 0x8;
404 save_gp
= INTVAL (En4
) & 0x4;
405 save_lp
= INTVAL (En4
) & 0x2;
407 /* Calculate the number of registers that will be pushed. */
415 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
416 if (REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
)
417 num_use_regs
= extra_count
;
419 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + extra_count
;
421 /* In addition to used registers,
422 we need one more space for (set sp sp-x) rtx. */
423 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
424 rtvec_alloc (num_use_regs
+ 1));
427 /* Initialize offset and start to create push behavior. */
428 offset
= -(num_use_regs
* 4);
430 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
431 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
433 /* Rb and Re may be SP_REGNUM.
434 We need to break this loop immediately. */
435 if (regno
== SP_REGNUM
)
438 reg
= gen_rtx_REG (SImode
, regno
);
439 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
442 push_rtx
= gen_rtx_SET (mem
, reg
);
443 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
444 RTX_FRAME_RELATED_P (push_rtx
) = 1;
449 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
452 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
453 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
456 push_rtx
= gen_rtx_SET (mem
, reg
);
457 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
458 RTX_FRAME_RELATED_P (push_rtx
) = 1;
464 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
465 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
468 push_rtx
= gen_rtx_SET (mem
, reg
);
469 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
470 RTX_FRAME_RELATED_P (push_rtx
) = 1;
476 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
477 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
480 push_rtx
= gen_rtx_SET (mem
, reg
);
481 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
482 RTX_FRAME_RELATED_P (push_rtx
) = 1;
487 /* Create (set sp sp-x). */
489 /* We need to re-calculate the offset value again for adjustment. */
490 offset
= -(num_use_regs
* 4);
492 = gen_rtx_SET (stack_pointer_rtx
,
493 plus_constant (Pmode
, stack_pointer_rtx
, offset
));
494 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
495 RTX_FRAME_RELATED_P (adjust_sp_rtx
) = 1;
497 parallel_insn
= emit_insn (parallel_insn
);
499 /* The insn rtx 'parallel_insn' will change frame layout.
500 We need to use RTX_FRAME_RELATED_P so that GCC is able to
501 generate CFI (Call Frame Information) stuff. */
502 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
504 /* Don't use GCC's logic for CFI info if we are generate a push for VAARG
505 since we will not restore those register at epilogue. */
508 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
,
509 copy_rtx (adjust_sp_rtx
), NULL_RTX
);
510 REG_NOTES (parallel_insn
) = dwarf
;
514 /* Function to create a parallel rtx pattern
515 which presents stack pop multiple behavior.
516 The overall concept are:
517 "pop registers from memory",
518 "adjust stack pointer". */
520 nds32_emit_stack_pop_multiple (rtx Rb
, rtx Re
, rtx En4
)
527 int save_fp
, save_gp
, save_lp
;
534 rtx dwarf
= NULL_RTX
;
536 /* We need to provide a customized rtx which contains
537 necessary information for data analysis,
538 so we create a parallel rtx like this:
539 (parallel [(set (reg:SI Rb)
540 (mem (reg:SI SP_REGNUM)))
542 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
545 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
546 (set (reg:SI FP_REGNUM)
547 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
548 (set (reg:SI GP_REGNUM)
549 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
550 (set (reg:SI LP_REGNUM)
551 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
552 (set (reg:SI SP_REGNUM)
553 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
555 /* Determine whether we need to restore $fp, $gp, or $lp. */
556 save_fp
= INTVAL (En4
) & 0x8;
557 save_gp
= INTVAL (En4
) & 0x4;
558 save_lp
= INTVAL (En4
) & 0x2;
560 /* Calculate the number of registers that will be poped. */
568 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
569 if (REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
)
570 num_use_regs
= extra_count
;
572 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + extra_count
;
574 /* In addition to used registers,
575 we need one more space for (set sp sp+x) rtx. */
576 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
577 rtvec_alloc (num_use_regs
+ 1));
580 /* Initialize offset and start to create pop behavior. */
583 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
584 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
586 /* Rb and Re may be SP_REGNUM.
587 We need to break this loop immediately. */
588 if (regno
== SP_REGNUM
)
591 reg
= gen_rtx_REG (SImode
, regno
);
592 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
595 pop_rtx
= gen_rtx_SET (reg
, mem
);
596 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
597 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
601 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
604 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
607 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
608 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
611 pop_rtx
= gen_rtx_SET (reg
, mem
);
612 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
613 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
617 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
621 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
622 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
625 pop_rtx
= gen_rtx_SET (reg
, mem
);
626 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
627 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
631 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
635 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
636 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
639 pop_rtx
= gen_rtx_SET (reg
, mem
);
640 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
641 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
645 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
648 /* Create (set sp sp+x). */
650 /* The offset value is already in place. No need to re-calculate it. */
652 = gen_rtx_SET (stack_pointer_rtx
,
653 plus_constant (Pmode
, stack_pointer_rtx
, offset
));
654 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
656 /* Tell gcc we adjust SP in this insn. */
657 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, copy_rtx (adjust_sp_rtx
), dwarf
);
659 parallel_insn
= emit_insn (parallel_insn
);
661 /* The insn rtx 'parallel_insn' will change frame layout.
662 We need to use RTX_FRAME_RELATED_P so that GCC is able to
663 generate CFI (Call Frame Information) stuff. */
664 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
666 /* Add CFI info by manual. */
667 REG_NOTES (parallel_insn
) = dwarf
;
670 /* Function to create a parallel rtx pattern
671 which presents stack v3push behavior.
672 The overall concept are:
673 "push registers to memory",
674 "adjust stack pointer". */
676 nds32_emit_stack_v3push (rtx Rb
,
678 rtx En4 ATTRIBUTE_UNUSED
,
692 /* We need to provide a customized rtx which contains
693 necessary information for data analysis,
694 so we create a parallel rtx like this:
695 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
697 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
700 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
702 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
704 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
706 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
708 (set (reg:SI SP_REGNUM)
709 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
711 /* Calculate the number of registers that will be pushed.
712 Since $fp, $gp, and $lp is always pushed with v3push instruction,
713 we need to count these three registers.
714 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
715 So there is no need to worry about Rb=Re=SP_REGNUM case. */
716 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + 3;
718 /* In addition to used registers,
719 we need one more space for (set sp sp-x-imm8u) rtx. */
720 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
721 rtvec_alloc (num_use_regs
+ 1));
724 /* Initialize offset and start to create push behavior. */
725 offset
= -(num_use_regs
* 4);
727 /* Create (set mem regX) from Rb, Rb+1 up to Re.
728 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
729 So there is no need to worry about Rb=Re=SP_REGNUM case. */
730 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
732 reg
= gen_rtx_REG (SImode
, regno
);
733 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
736 push_rtx
= gen_rtx_SET (mem
, reg
);
737 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
738 RTX_FRAME_RELATED_P (push_rtx
) = 1;
743 /* Create (set mem fp). */
744 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
745 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
748 push_rtx
= gen_rtx_SET (mem
, reg
);
749 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
750 RTX_FRAME_RELATED_P (push_rtx
) = 1;
753 /* Create (set mem gp). */
754 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
755 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
758 push_rtx
= gen_rtx_SET (mem
, reg
);
759 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
760 RTX_FRAME_RELATED_P (push_rtx
) = 1;
763 /* Create (set mem lp). */
764 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
765 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
768 push_rtx
= gen_rtx_SET (mem
, reg
);
769 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
770 RTX_FRAME_RELATED_P (push_rtx
) = 1;
774 /* Create (set sp sp-x-imm8u). */
776 /* We need to re-calculate the offset value again for adjustment. */
777 offset
= -(num_use_regs
* 4);
779 = gen_rtx_SET (stack_pointer_rtx
,
780 plus_constant (Pmode
,
782 offset
- INTVAL (imm8u
)));
783 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
784 RTX_FRAME_RELATED_P (adjust_sp_rtx
) = 1;
786 parallel_insn
= emit_insn (parallel_insn
);
788 /* The insn rtx 'parallel_insn' will change frame layout.
789 We need to use RTX_FRAME_RELATED_P so that GCC is able to
790 generate CFI (Call Frame Information) stuff. */
791 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
794 /* Function to create a parallel rtx pattern
795 which presents stack v3pop behavior.
796 The overall concept are:
797 "pop registers from memory",
798 "adjust stack pointer". */
800 nds32_emit_stack_v3pop (rtx Rb
,
802 rtx En4 ATTRIBUTE_UNUSED
,
815 rtx dwarf
= NULL_RTX
;
817 /* We need to provide a customized rtx which contains
818 necessary information for data analysis,
819 so we create a parallel rtx like this:
820 (parallel [(set (reg:SI Rb)
821 (mem (reg:SI SP_REGNUM)))
823 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
826 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
827 (set (reg:SI FP_REGNUM)
828 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
829 (set (reg:SI GP_REGNUM)
830 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
831 (set (reg:SI LP_REGNUM)
832 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
833 (set (reg:SI SP_REGNUM)
834 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
836 /* Calculate the number of registers that will be poped.
837 Since $fp, $gp, and $lp is always poped with v3pop instruction,
838 we need to count these three registers.
839 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
840 So there is no need to worry about Rb=Re=SP_REGNUM case. */
841 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + 3;
843 /* In addition to used registers,
844 we need one more space for (set sp sp+x+imm8u) rtx. */
845 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
846 rtvec_alloc (num_use_regs
+ 1));
849 /* Initialize offset and start to create pop behavior. */
852 /* Create (set regX mem) from Rb, Rb+1 up to Re.
853 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
854 So there is no need to worry about Rb=Re=SP_REGNUM case. */
855 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
857 reg
= gen_rtx_REG (SImode
, regno
);
858 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
861 pop_rtx
= gen_rtx_SET (reg
, mem
);
862 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
863 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
867 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
870 /* Create (set fp mem). */
871 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
872 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
875 pop_rtx
= gen_rtx_SET (reg
, mem
);
876 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
877 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
880 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
882 /* Create (set gp mem). */
883 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
884 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
887 pop_rtx
= gen_rtx_SET (reg
, mem
);
888 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
889 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
892 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
894 /* Create (set lp mem ). */
895 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
896 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
899 pop_rtx
= gen_rtx_SET (reg
, mem
);
900 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
901 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
904 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
906 /* Create (set sp sp+x+imm8u). */
908 /* The offset value is already in place. No need to re-calculate it. */
910 = gen_rtx_SET (stack_pointer_rtx
,
911 plus_constant (Pmode
,
913 offset
+ INTVAL (imm8u
)));
914 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
916 /* Tell gcc we adjust SP in this insn. */
917 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, copy_rtx (adjust_sp_rtx
), dwarf
);
919 parallel_insn
= emit_insn (parallel_insn
);
921 /* The insn rtx 'parallel_insn' will change frame layout.
922 We need to use RTX_FRAME_RELATED_P so that GCC is able to
923 generate CFI (Call Frame Information) stuff. */
924 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
926 /* Add CFI info by manual. */
927 REG_NOTES (parallel_insn
) = dwarf
;
930 /* Function that may creates more instructions
931 for large value on adjusting stack pointer.
933 In nds32 target, 'addi' can be used for stack pointer
934 adjustment in prologue/epilogue stage.
935 However, sometimes there are too many local variables so that
936 the adjustment value is not able to be fit in the 'addi' instruction.
937 One solution is to move value into a register
938 and then use 'add' instruction.
939 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
940 Also, we need to return zero for sp adjustment so that
941 proglogue/epilogue knows there is no need to create 'addi' instruction. */
943 nds32_force_addi_stack_int (int full_value
)
950 if (!satisfies_constraint_Is15 (GEN_INT (full_value
)))
952 /* The value is not able to fit in single addi instruction.
953 Create more instructions of moving value into a register
954 and then add stack pointer with it. */
956 /* $r15 is going to be temporary register to hold the value. */
957 tmp_reg
= gen_rtx_REG (SImode
, TA_REGNUM
);
959 /* Create one more instruction to move value
960 into the temporary register. */
961 emit_move_insn (tmp_reg
, GEN_INT (full_value
));
963 /* Create new 'add' rtx. */
964 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
967 /* Emit rtx into insn list and receive its transformed insn rtx. */
968 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
970 /* At prologue, we need to tell GCC that this is frame related insn,
971 so that we can consider this instruction to output debug information.
972 If full_value is NEGATIVE, it means this function
973 is invoked by expand_prologue. */
976 /* Because (tmp_reg <- full_value) may be split into two
977 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
978 We need to construct another (sp <- sp + full_value)
979 and then insert it into sp_adjust_insn's reg note to
980 represent a frame related expression.
981 GCC knows how to refer it and output debug information. */
986 plus_rtx
= plus_constant (Pmode
, stack_pointer_rtx
, full_value
);
987 set_rtx
= gen_rtx_SET (stack_pointer_rtx
, plus_rtx
);
988 add_reg_note (sp_adjust_insn
, REG_FRAME_RELATED_EXPR
, set_rtx
);
990 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
993 /* We have used alternative way to adjust stack pointer value.
994 Return zero so that prologue/epilogue
995 will not generate other instructions. */
1000 /* The value is able to fit in addi instruction.
1001 However, remember to make it to be positive value
1002 because we want to return 'adjustment' result. */
1003 adjust_value
= (full_value
< 0) ? (-full_value
) : (full_value
);
1005 return adjust_value
;
1009 /* Return true if MODE/TYPE need double word alignment. */
1011 nds32_needs_double_word_align (machine_mode mode
, const_tree type
)
1015 /* Pick up the alignment according to the mode or type. */
1016 align
= NDS32_MODE_TYPE_ALIGN (mode
, type
);
1018 return (align
> PARM_BOUNDARY
);
1021 /* Return true if FUNC is a naked function. */
1023 nds32_naked_function_p (tree func
)
1027 if (TREE_CODE (func
) != FUNCTION_DECL
)
1030 t
= lookup_attribute ("naked", DECL_ATTRIBUTES (func
));
1032 return (t
!= NULL_TREE
);
1035 /* Function that check if 'X' is a valid address register.
1036 The variable 'STRICT' is very important to
1037 make decision for register number.
1040 => We are in reload pass or after reload pass.
1041 The register number should be strictly limited in general registers.
1044 => Before reload pass, we are free to use any register number. */
1046 nds32_address_register_rtx_p (rtx x
, bool strict
)
1050 if (GET_CODE (x
) != REG
)
1056 return REGNO_OK_FOR_BASE_P (regno
);
1061 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1063 OUTER_MODE : Machine mode of outer address rtx.
1064 INDEX : Check if this rtx is valid to be a index for address.
1065 STRICT : If it is true, we are in reload pass or after reload pass. */
1067 nds32_legitimate_index_p (machine_mode outer_mode
,
1075 switch (GET_CODE (index
))
1078 regno
= REGNO (index
);
1079 /* If we are in reload pass or after reload pass,
1080 we need to limit it to general register. */
1082 return REGNO_OK_FOR_INDEX_P (regno
);
1087 /* The alignment of the integer value is determined by 'outer_mode'. */
1088 if (GET_MODE_SIZE (outer_mode
) == 1)
1090 /* Further check if the value is legal for the 'outer_mode'. */
1091 if (!satisfies_constraint_Is15 (index
))
1094 /* Pass all test, the value is valid, return true. */
1097 if (GET_MODE_SIZE (outer_mode
) == 2
1098 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index
)))
1100 /* Further check if the value is legal for the 'outer_mode'. */
1101 if (!satisfies_constraint_Is16 (index
))
1104 /* Pass all test, the value is valid, return true. */
1107 if (GET_MODE_SIZE (outer_mode
) == 4
1108 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index
)))
1110 /* Further check if the value is legal for the 'outer_mode'. */
1111 if (!satisfies_constraint_Is17 (index
))
1114 /* Pass all test, the value is valid, return true. */
1117 if (GET_MODE_SIZE (outer_mode
) == 8
1118 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index
)))
1120 /* Further check if the value is legal for the 'outer_mode'. */
1121 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index
) + 4,
1125 /* Pass all test, the value is valid, return true. */
1132 op0
= XEXP (index
, 0);
1133 op1
= XEXP (index
, 1);
1135 if (REG_P (op0
) && CONST_INT_P (op1
))
1138 multiplier
= INTVAL (op1
);
1140 /* We only allow (mult reg const_int_1)
1141 or (mult reg const_int_2) or (mult reg const_int_4). */
1142 if (multiplier
!= 1 && multiplier
!= 2 && multiplier
!= 4)
1145 regno
= REGNO (op0
);
1146 /* Limit it in general registers if we are
1147 in reload pass or after reload pass. */
1149 return REGNO_OK_FOR_INDEX_P (regno
);
1157 op0
= XEXP (index
, 0);
1158 op1
= XEXP (index
, 1);
1160 if (REG_P (op0
) && CONST_INT_P (op1
))
1163 /* op1 is already the sv value for use to do left shift. */
1166 /* We only allow (ashift reg const_int_0)
1167 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1168 if (sv
!= 0 && sv
!= 1 && sv
!=2)
1171 regno
= REGNO (op0
);
1172 /* Limit it in general registers if we are
1173 in reload pass or after reload pass. */
1175 return REGNO_OK_FOR_INDEX_P (regno
);
1187 /* ------------------------------------------------------------------------ */
1189 /* PART 3: Implement target hook stuff definitions. */
1191 /* Register Classes. */
1193 static unsigned char
1194 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED
,
1197 /* Return the maximum number of consecutive registers
1198 needed to represent "mode" in a register of "rclass". */
1199 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
1203 nds32_register_priority (int hard_regno
)
1205 /* Encourage to use r0-r7 for LRA when optimize for size. */
1206 if (optimize_size
&& hard_regno
< 8)
1212 /* Stack Layout and Calling Conventions. */
1214 /* There are three kinds of pointer concepts using in GCC compiler:
1216 frame pointer: A pointer to the first location of local variables.
1217 stack pointer: A pointer to the top of a stack frame.
1218 argument pointer: A pointer to the incoming arguments.
1220 In nds32 target calling convention, we are using 8-byte alignment.
1221 Besides, we would like to have each stack frame of a function includes:
1224 1. previous hard frame pointer
1226 3. callee-saved registers
1227 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1229 cfun->machine->callee_saved_area_padding_bytes)
1233 2. spilling location
1234 3. <padding bytes> (it will be calculated by GCC itself)
1235 4. incoming arguments
1236 5. <padding bytes> (it will be calculated by GCC itself)
1239 1. <padding bytes> (it will be calculated by GCC itself)
1240 2. outgoing arguments
1242 We 'wrap' these blocks together with
1243 hard frame pointer ($r28) and stack pointer ($r31).
1244 By applying the basic frame/stack/argument pointers concept,
1245 the layout of a stack frame shoule be like this:
1248 old stack pointer -> ----
1250 | | saved arguments for
1251 | | vararg functions
1253 hard frame pointer -> --
1254 & argument pointer | | \
1255 | | previous hardware frame pointer
1257 | | callee-saved registers
1262 | | and incoming arguments
1269 stack pointer -> ----
1271 $SFP and $AP are used to represent frame pointer and arguments pointer,
1272 which will be both eliminated as hard frame pointer. */
1274 /* -- Eliminating Frame Pointer and Arg Pointer. */
1277 nds32_can_eliminate (const int from_reg
, const int to_reg
)
1279 if (from_reg
== ARG_POINTER_REGNUM
&& to_reg
== STACK_POINTER_REGNUM
)
1282 if (from_reg
== ARG_POINTER_REGNUM
&& to_reg
== HARD_FRAME_POINTER_REGNUM
)
1285 if (from_reg
== FRAME_POINTER_REGNUM
&& to_reg
== STACK_POINTER_REGNUM
)
1288 if (from_reg
== FRAME_POINTER_REGNUM
&& to_reg
== HARD_FRAME_POINTER_REGNUM
)
1294 /* -- Passing Arguments in Registers. */
1297 nds32_function_arg (cumulative_args_t ca
, machine_mode mode
,
1298 const_tree type
, bool named
)
1301 CUMULATIVE_ARGS
*cum
= get_cumulative_args (ca
);
1303 /* The last time this hook is called,
1304 it is called with MODE == VOIDmode. */
1305 if (mode
== VOIDmode
)
1308 /* For nameless arguments, we need to take care it individually. */
1311 /* If we are under hard float abi, we have arguments passed on the
1312 stack and all situation can be handled by GCC itself. */
1313 if (TARGET_HARD_FLOAT
)
1316 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum
->gpr_offset
, mode
, type
))
1318 /* If we still have enough registers to pass argument, pick up
1319 next available register number. */
1321 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
);
1322 return gen_rtx_REG (mode
, regno
);
1325 /* No register available, return NULL_RTX.
1326 The compiler will use stack to pass argument instead. */
1330 /* The following is to handle named argument.
1331 Note that the strategies of TARGET_HARD_FLOAT and !TARGET_HARD_FLOAT
1333 if (TARGET_HARD_FLOAT
)
1335 /* Currently we have not implemented hard float yet. */
1340 /* For !TARGET_HARD_FLOAT calling convention, we always use GPR to pass
1341 argument. Since we allow to pass argument partially in registers,
1342 we can just return it if there are still registers available. */
1343 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum
->gpr_offset
, mode
, type
))
1345 /* Pick up the next available register number. */
1347 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
);
1348 return gen_rtx_REG (mode
, regno
);
1353 /* No register available, return NULL_RTX.
1354 The compiler will use stack to pass argument instead. */
1359 nds32_must_pass_in_stack (machine_mode mode
, const_tree type
)
1361 /* Return true if a type must be passed in memory.
1362 If it is NOT using hard float abi, small aggregates can be
1363 passed in a register even we are calling a variadic function.
1364 So there is no need to take padding into consideration. */
1365 if (TARGET_HARD_FLOAT
)
1366 return must_pass_in_stack_var_size_or_pad (mode
, type
);
1368 return must_pass_in_stack_var_size (mode
, type
);
1372 nds32_arg_partial_bytes (cumulative_args_t ca
, machine_mode mode
,
1373 tree type
, bool named ATTRIBUTE_UNUSED
)
1375 /* Returns the number of bytes at the beginning of an argument that
1376 must be put in registers. The value must be zero for arguments that are
1377 passed entirely in registers or that are entirely pushed on the stack.
1378 Besides, TARGET_FUNCTION_ARG for these arguments should return the
1379 first register to be used by the caller for this argument. */
1380 unsigned int needed_reg_count
;
1381 unsigned int remaining_reg_count
;
1382 CUMULATIVE_ARGS
*cum
;
1384 cum
= get_cumulative_args (ca
);
1386 /* Under hard float abi, we better have argument entirely passed in
1387 registers or pushed on the stack so that we can reduce the complexity
1388 of dealing with cum->gpr_offset and cum->fpr_offset. */
1389 if (TARGET_HARD_FLOAT
)
1392 /* If we have already runned out of argument registers, return zero
1393 so that the argument will be entirely pushed on the stack. */
1394 if (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1395 >= NDS32_GPR_ARG_FIRST_REGNUM
+ NDS32_MAX_GPR_REGS_FOR_ARGS
)
1398 /* Calculate how many registers do we need for this argument. */
1399 needed_reg_count
= NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1401 /* Calculate how many argument registers have left for passing argument.
1402 Note that we should count it from next available register number. */
1404 = NDS32_MAX_GPR_REGS_FOR_ARGS
1405 - (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1406 - NDS32_GPR_ARG_FIRST_REGNUM
);
1408 /* Note that we have to return the nubmer of bytes, not registers count. */
1409 if (needed_reg_count
> remaining_reg_count
)
1410 return remaining_reg_count
* UNITS_PER_WORD
;
1416 nds32_function_arg_advance (cumulative_args_t ca
, machine_mode mode
,
1417 const_tree type
, bool named
)
1419 machine_mode sub_mode
;
1420 CUMULATIVE_ARGS
*cum
= get_cumulative_args (ca
);
1424 /* We need to further check TYPE and MODE so that we can determine
1425 which kind of register we shall advance. */
1426 if (type
&& TREE_CODE (type
) == COMPLEX_TYPE
)
1427 sub_mode
= TYPE_MODE (TREE_TYPE (type
));
1431 /* Under hard float abi, we may advance FPR registers. */
1432 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (sub_mode
) == MODE_FLOAT
)
1434 /* Currently we have not implemented hard float yet. */
1440 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1441 - NDS32_GPR_ARG_FIRST_REGNUM
1442 + NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1447 /* If this nameless argument is NOT under TARGET_HARD_FLOAT,
1448 we can advance next register as well so that caller is
1449 able to pass arguments in registers and callee must be
1450 in charge of pushing all of them into stack. */
1451 if (!TARGET_HARD_FLOAT
)
1454 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1455 - NDS32_GPR_ARG_FIRST_REGNUM
1456 + NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1462 nds32_function_arg_boundary (machine_mode mode
, const_tree type
)
1464 return (nds32_needs_double_word_align (mode
, type
)
1465 ? NDS32_DOUBLE_WORD_ALIGNMENT
1469 /* -- How Scalar Function Values Are Returned. */
1472 nds32_function_value (const_tree ret_type
,
1473 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1474 bool outgoing ATTRIBUTE_UNUSED
)
1479 mode
= TYPE_MODE (ret_type
);
1480 unsignedp
= TYPE_UNSIGNED (ret_type
);
1482 mode
= promote_mode (ret_type
, mode
, &unsignedp
);
1484 return gen_rtx_REG (mode
, NDS32_GPR_RET_FIRST_REGNUM
);
1488 nds32_libcall_value (machine_mode mode
,
1489 const_rtx fun ATTRIBUTE_UNUSED
)
1491 return gen_rtx_REG (mode
, NDS32_GPR_RET_FIRST_REGNUM
);
1495 nds32_function_value_regno_p (const unsigned int regno
)
1497 return (regno
== NDS32_GPR_RET_FIRST_REGNUM
);
1500 /* -- Function Entry and Exit. */
1502 /* The content produced from this function
1503 will be placed before prologue body. */
1505 nds32_asm_function_prologue (FILE *file
)
1508 const char *func_name
;
1512 /* All stack frame information is supposed to be
1513 already computed when expanding prologue.
1514 The result is in cfun->machine.
1515 DO NOT call nds32_compute_stack_frame() here
1516 because it may corrupt the essential information. */
1518 fprintf (file
, "\t! BEGIN PROLOGUE\n");
1519 fprintf (file
, "\t! fp needed: %d\n", frame_pointer_needed
);
1520 fprintf (file
, "\t! pretend_args: %d\n", cfun
->machine
->va_args_size
);
1521 fprintf (file
, "\t! local_size: %d\n", cfun
->machine
->local_size
);
1522 fprintf (file
, "\t! out_args_size: %d\n", cfun
->machine
->out_args_size
);
1524 /* Use df_regs_ever_live_p() to detect if the register
1525 is ever used in the current function. */
1526 fprintf (file
, "\t! registers ever_live: ");
1527 for (r
= 0; r
< 32; r
++)
1529 if (df_regs_ever_live_p (r
))
1530 fprintf (file
, "%s, ", reg_names
[r
]);
1534 /* Display the attributes of this function. */
1535 fprintf (file
, "\t! function attributes: ");
1536 /* Get the attributes tree list.
1537 Note that GCC builds attributes list with reverse order. */
1538 attrs
= DECL_ATTRIBUTES (current_function_decl
);
1540 /* If there is no any attribute, print out "None". */
1542 fprintf (file
, "None");
1544 /* If there are some attributes, try if we need to
1545 construct isr vector information. */
1546 func_name
= IDENTIFIER_POINTER (DECL_NAME (current_function_decl
));
1547 nds32_construct_isr_vectors_information (attrs
, func_name
);
1549 /* Display all attributes of this function. */
1552 name
= TREE_PURPOSE (attrs
);
1553 fprintf (file
, "%s ", IDENTIFIER_POINTER (name
));
1555 /* Pick up the next attribute. */
1556 attrs
= TREE_CHAIN (attrs
);
1561 /* After rtl prologue has been expanded, this function is used. */
1563 nds32_asm_function_end_prologue (FILE *file
)
1565 fprintf (file
, "\t! END PROLOGUE\n");
1567 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1568 we can generate special directive: ".omit_fp_begin"
1569 to guide linker doing fp-as-gp optimization.
1570 However, for a naked function, which means
1571 it should not have prologue/epilogue,
1572 using fp-as-gp still requires saving $fp by push/pop behavior and
1573 there is no benefit to use fp-as-gp on such small function.
1574 So we need to make sure this function is NOT naked as well. */
1575 if (!frame_pointer_needed
1576 && !cfun
->machine
->naked_p
1577 && cfun
->machine
->fp_as_gp_p
)
1579 fprintf (file
, "\t! ----------------------------------------\n");
1580 fprintf (file
, "\t! Guide linker to do "
1581 "link time optimization: fp-as-gp\n");
1582 fprintf (file
, "\t! We add one more instruction to "
1583 "initialize $fp near to $gp location.\n");
1584 fprintf (file
, "\t! If linker fails to use fp-as-gp transformation,\n");
1585 fprintf (file
, "\t! this extra instruction should be "
1586 "eliminated at link stage.\n");
1587 fprintf (file
, "\t.omit_fp_begin\n");
1588 fprintf (file
, "\tla\t$fp,_FP_BASE_\n");
1589 fprintf (file
, "\t! ----------------------------------------\n");
1593 /* Before rtl epilogue has been expanded, this function is used. */
1595 nds32_asm_function_begin_epilogue (FILE *file
)
1597 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1598 we can generate special directive: ".omit_fp_end"
1599 to claim fp-as-gp optimization range.
1600 However, for a naked function,
1601 which means it should not have prologue/epilogue,
1602 using fp-as-gp still requires saving $fp by push/pop behavior and
1603 there is no benefit to use fp-as-gp on such small function.
1604 So we need to make sure this function is NOT naked as well. */
1605 if (!frame_pointer_needed
1606 && !cfun
->machine
->naked_p
1607 && cfun
->machine
->fp_as_gp_p
)
1609 fprintf (file
, "\t! ----------------------------------------\n");
1610 fprintf (file
, "\t! Claim the range of fp-as-gp "
1611 "link time optimization\n");
1612 fprintf (file
, "\t.omit_fp_end\n");
1613 fprintf (file
, "\t! ----------------------------------------\n");
1616 fprintf (file
, "\t! BEGIN EPILOGUE\n");
1619 /* The content produced from this function
1620 will be placed after epilogue body. */
1622 nds32_asm_function_epilogue (FILE *file
)
1624 fprintf (file
, "\t! END EPILOGUE\n");
1628 nds32_asm_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
1629 HOST_WIDE_INT delta
,
1630 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
1635 /* Make sure unwind info is emitted for the thunk if needed. */
1636 final_start_function (emit_barrier (), file
, 1);
1638 this_regno
= (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
)
1644 if (satisfies_constraint_Is15 (GEN_INT (delta
)))
1646 fprintf (file
, "\taddi\t$r%d, $r%d, %ld\n",
1647 this_regno
, this_regno
, delta
);
1649 else if (satisfies_constraint_Is20 (GEN_INT (delta
)))
1651 fprintf (file
, "\tmovi\t$ta, %ld\n", delta
);
1652 fprintf (file
, "\tadd\t$r%d, $r%d, $ta\n", this_regno
, this_regno
);
1656 fprintf (file
, "\tsethi\t$ta, hi20(%ld)\n", delta
);
1657 fprintf (file
, "\tori\t$ta, $ta, lo12(%ld)\n", delta
);
1658 fprintf (file
, "\tadd\t$r%d, $r%d, $ta\n", this_regno
, this_regno
);
1662 fprintf (file
, "\tb\t");
1663 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
1664 fprintf (file
, "\n");
1666 final_end_function ();
1669 /* -- Permitting tail calls. */
1671 /* Determine whether we need to enable warning for function return check. */
1673 nds32_warn_func_return (tree decl
)
1675 /* Naked functions are implemented entirely in assembly, including the
1676 return sequence, so suppress warnings about this. */
1677 return !nds32_naked_function_p (decl
);
1681 /* Implementing the Varargs Macros. */
1684 nds32_setup_incoming_varargs (cumulative_args_t ca
,
1687 int *pretend_args_size
,
1688 int second_time ATTRIBUTE_UNUSED
)
1690 unsigned int total_args_regs
;
1691 unsigned int num_of_used_regs
;
1692 unsigned int remaining_reg_count
;
1693 CUMULATIVE_ARGS
*cum
;
1695 /* If we are under hard float abi, we do not need to set *pretend_args_size.
1696 So that all nameless arguments are pushed by caller and all situation
1697 can be handled by GCC itself. */
1698 if (TARGET_HARD_FLOAT
)
1701 /* We are using NDS32_MAX_GPR_REGS_FOR_ARGS registers,
1702 counting from NDS32_GPR_ARG_FIRST_REGNUM, for saving incoming arguments.
1703 However, for nameless(anonymous) arguments, we should push them on the
1704 stack so that all the nameless arguments appear to have been passed
1705 consecutively in the memory for accessing. Hence, we need to check and
1706 exclude the registers that are used for named arguments. */
1708 cum
= get_cumulative_args (ca
);
1710 /* The MODE and TYPE describe the last argument.
1711 We need those information to determine the remaining registers
1714 = NDS32_MAX_GPR_REGS_FOR_ARGS
+ NDS32_GPR_ARG_FIRST_REGNUM
;
1716 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1717 + NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1719 remaining_reg_count
= total_args_regs
- num_of_used_regs
;
1720 *pretend_args_size
= remaining_reg_count
* UNITS_PER_WORD
;
1726 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1728 /* If this hook returns true, the named argument of FUNCTION_ARG is always
1729 true for named arguments, and false for unnamed arguments. */
1734 /* Trampolines for Nested Functions. */
1737 nds32_asm_trampoline_template (FILE *f
)
1739 if (TARGET_REDUCED_REGS
)
1741 /* Trampoline is not supported on reduced-set registers yet. */
1742 sorry ("a nested function is not supported for reduced registers");
1746 asm_fprintf (f
, "\t! Trampoline code template\n");
1747 asm_fprintf (f
, "\t! This code fragment will be copied "
1748 "into stack on demand\n");
1750 asm_fprintf (f
, "\tmfusr\t$r16,$pc\n");
1751 asm_fprintf (f
, "\tlwi\t$r15,[$r16 + 20] "
1752 "! load nested function address\n");
1753 asm_fprintf (f
, "\tlwi\t$r16,[$r16 + 16] "
1754 "! load chain_value\n");
1755 asm_fprintf (f
, "\tjr\t$r15\n");
1758 /* Preserve space ($pc + 16) for saving chain_value,
1759 nds32_trampoline_init will fill the value in this slot. */
1760 asm_fprintf (f
, "\t! space for saving chain_value\n");
1761 assemble_aligned_integer (UNITS_PER_WORD
, const0_rtx
);
1763 /* Preserve space ($pc + 20) for saving nested function address,
1764 nds32_trampoline_init will fill the value in this slot. */
1765 asm_fprintf (f
, "\t! space for saving nested function address\n");
1766 assemble_aligned_integer (UNITS_PER_WORD
, const0_rtx
);
1769 /* Emit RTL insns to initialize the variable parts of a trampoline. */
1771 nds32_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
1775 /* Nested function address. */
1777 /* The memory rtx that is going to
1778 be filled with chain_value. */
1779 rtx chain_value_mem
;
1780 /* The memory rtx that is going to
1781 be filled with nested function address. */
1782 rtx nested_func_mem
;
1784 /* Start address of trampoline code in stack, for doing cache sync. */
1785 rtx sync_cache_addr
;
1786 /* Temporary register for sync instruction. */
1788 /* Instruction-cache sync instruction,
1789 requesting an argument as starting address. */
1791 /* For convenience reason of doing comparison. */
1792 int tramp_align_in_bytes
;
1794 /* Trampoline is not supported on reduced-set registers yet. */
1795 if (TARGET_REDUCED_REGS
)
1796 sorry ("a nested function is not supported for reduced registers");
1798 /* STEP 1: Copy trampoline code template into stack,
1799 fill up essential data into stack. */
1801 /* Extract nested function address rtx. */
1802 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
1804 /* m_tramp is memory rtx that is going to be filled with trampoline code.
1805 We have nds32_asm_trampoline_template() to emit template pattern. */
1806 emit_block_move (m_tramp
, assemble_trampoline_template (),
1807 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
1809 /* After copying trampoline code into stack,
1810 fill chain_value into stack. */
1811 chain_value_mem
= adjust_address (m_tramp
, SImode
, 16);
1812 emit_move_insn (chain_value_mem
, chain_value
);
1813 /* After copying trampoline code int stack,
1814 fill nested function address into stack. */
1815 nested_func_mem
= adjust_address (m_tramp
, SImode
, 20);
1816 emit_move_insn (nested_func_mem
, fnaddr
);
1818 /* STEP 2: Sync instruction-cache. */
1820 /* We have successfully filled trampoline code into stack.
1821 However, in order to execute code in stack correctly,
1822 we must sync instruction cache. */
1823 sync_cache_addr
= XEXP (m_tramp
, 0);
1824 tmp_reg
= gen_reg_rtx (SImode
);
1825 isync_insn
= gen_unspec_volatile_isync (tmp_reg
);
1827 /* Because nds32_cache_block_size is in bytes,
1828 we get trampoline alignment in bytes for convenient comparison. */
1829 tramp_align_in_bytes
= TRAMPOLINE_ALIGNMENT
/ BITS_PER_UNIT
;
1831 if (tramp_align_in_bytes
>= nds32_cache_block_size
1832 && (tramp_align_in_bytes
% nds32_cache_block_size
) == 0)
1834 /* Under this condition, the starting address of trampoline
1835 must be aligned to the starting address of each cache block
1836 and we do not have to worry about cross-boundary issue. */
1838 i
< (TRAMPOLINE_SIZE
+ nds32_cache_block_size
- 1)
1839 / nds32_cache_block_size
;
1842 emit_move_insn (tmp_reg
,
1843 plus_constant (Pmode
, sync_cache_addr
,
1844 nds32_cache_block_size
* i
));
1845 emit_insn (isync_insn
);
1848 else if (TRAMPOLINE_SIZE
> nds32_cache_block_size
)
1850 /* The starting address of trampoline code
1851 may not be aligned to the cache block,
1852 so the trampoline code may be across two cache block.
1853 We need to sync the last element, which is 4-byte size,
1854 of trampoline template. */
1856 i
< (TRAMPOLINE_SIZE
+ nds32_cache_block_size
- 1)
1857 / nds32_cache_block_size
;
1860 emit_move_insn (tmp_reg
,
1861 plus_constant (Pmode
, sync_cache_addr
,
1862 nds32_cache_block_size
* i
));
1863 emit_insn (isync_insn
);
1866 /* The last element of trampoline template is 4-byte size. */
1867 emit_move_insn (tmp_reg
,
1868 plus_constant (Pmode
, sync_cache_addr
,
1869 TRAMPOLINE_SIZE
- 4));
1870 emit_insn (isync_insn
);
1874 /* This is the simplest case.
1875 Because TRAMPOLINE_SIZE is less than or
1876 equal to nds32_cache_block_size,
1877 we can just sync start address and
1878 the last element of trampoline code. */
1880 /* Sync starting address of tampoline code. */
1881 emit_move_insn (tmp_reg
, sync_cache_addr
);
1882 emit_insn (isync_insn
);
1883 /* Sync the last element, which is 4-byte size,
1884 of trampoline template. */
1885 emit_move_insn (tmp_reg
,
1886 plus_constant (Pmode
, sync_cache_addr
,
1887 TRAMPOLINE_SIZE
- 4));
1888 emit_insn (isync_insn
);
1891 /* Set instruction serialization barrier
1892 to guarantee the correct operations. */
1893 emit_insn (gen_unspec_volatile_isb ());
1897 /* Addressing Modes. */
1900 nds32_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1902 /* For (mem:DI addr) or (mem:DF addr) case,
1903 we only allow 'addr' to be [reg], [symbol_ref],
1904 [const], or [reg + const_int] pattern. */
1905 if (mode
== DImode
|| mode
== DFmode
)
1907 /* Allow [Reg + const_int] addressing mode. */
1908 if (GET_CODE (x
) == PLUS
)
1910 if (nds32_address_register_rtx_p (XEXP (x
, 0), strict
)
1911 && nds32_legitimate_index_p (mode
, XEXP (x
, 1), strict
)
1912 && CONST_INT_P (XEXP (x
, 1)))
1914 else if (nds32_address_register_rtx_p (XEXP (x
, 1), strict
)
1915 && nds32_legitimate_index_p (mode
, XEXP (x
, 0), strict
)
1916 && CONST_INT_P (XEXP (x
, 0)))
1920 /* Now check [reg], [symbol_ref], and [const]. */
1921 if (GET_CODE (x
) != REG
1922 && GET_CODE (x
) != SYMBOL_REF
1923 && GET_CODE (x
) != CONST
)
1927 /* Check if 'x' is a valid address. */
1928 switch (GET_CODE (x
))
1931 /* (mem (reg A)) => [Ra] */
1932 return nds32_address_register_rtx_p (x
, strict
);
1935 /* (mem (symbol_ref A)) => [symbol_ref] */
1936 /* If -mcmodel=large, the 'symbol_ref' is not a valid address
1937 during or after LRA/reload phase. */
1938 if (TARGET_CMODEL_LARGE
1939 && (reload_completed
1940 || reload_in_progress
1941 || lra_in_progress
))
1943 /* If -mcmodel=medium and the symbol references to rodata section,
1944 the 'symbol_ref' is not a valid address during or after
1945 LRA/reload phase. */
1946 if (TARGET_CMODEL_MEDIUM
1947 && NDS32_SYMBOL_REF_RODATA_P (x
)
1948 && (reload_completed
1949 || reload_in_progress
1950 || lra_in_progress
))
1956 /* (mem (const (...)))
1957 => [ + const_addr ], where const_addr = symbol_ref + const_int */
1958 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
1960 rtx plus_op
= XEXP (x
, 0);
1962 rtx op0
= XEXP (plus_op
, 0);
1963 rtx op1
= XEXP (plus_op
, 1);
1965 if (GET_CODE (op0
) == SYMBOL_REF
&& CONST_INT_P (op1
))
1967 /* Now we see the [ + const_addr ] pattern, but we need
1968 some further checking. */
1969 /* If -mcmodel=large, the 'const_addr' is not a valid address
1970 during or after LRA/reload phase. */
1971 if (TARGET_CMODEL_LARGE
1972 && (reload_completed
1973 || reload_in_progress
1974 || lra_in_progress
))
1976 /* If -mcmodel=medium and the symbol references to rodata section,
1977 the 'const_addr' is not a valid address during or after
1978 LRA/reload phase. */
1979 if (TARGET_CMODEL_MEDIUM
1980 && NDS32_SYMBOL_REF_RODATA_P (op0
)
1981 && (reload_completed
1982 || reload_in_progress
1983 || lra_in_progress
))
1986 /* At this point we can make sure 'const_addr' is a
1995 /* (mem (post_modify (reg) (plus (reg) (reg))))
1997 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1998 => [Ra], const_int */
1999 if (GET_CODE (XEXP (x
, 0)) == REG
2000 && GET_CODE (XEXP (x
, 1)) == PLUS
)
2002 rtx plus_op
= XEXP (x
, 1);
2004 rtx op0
= XEXP (plus_op
, 0);
2005 rtx op1
= XEXP (plus_op
, 1);
2007 if (nds32_address_register_rtx_p (op0
, strict
)
2008 && nds32_legitimate_index_p (mode
, op1
, strict
))
2018 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2019 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2020 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2021 We only need to deal with register Ra. */
2022 if (nds32_address_register_rtx_p (XEXP (x
, 0), strict
))
2028 /* (mem (plus reg const_int))
2030 /* (mem (plus reg reg))
2032 /* (mem (plus (mult reg const_int) reg))
2033 => [Ra + Rb << sv] */
2034 if (nds32_address_register_rtx_p (XEXP (x
, 0), strict
)
2035 && nds32_legitimate_index_p (mode
, XEXP (x
, 1), strict
))
2037 else if (nds32_address_register_rtx_p (XEXP (x
, 1), strict
)
2038 && nds32_legitimate_index_p (mode
, XEXP (x
, 0), strict
))
2044 /* (mem (lo_sum (reg) (symbol_ref))) */
2045 /* (mem (lo_sum (reg) (const))) */
2046 gcc_assert (REG_P (XEXP (x
, 0)));
2047 if (GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
2048 || GET_CODE (XEXP (x
, 1)) == CONST
)
2049 return nds32_legitimate_address_p (mode
, XEXP (x
, 1), strict
);
2059 /* Describing Relative Costs of Operations. */
2062 nds32_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2066 if (from
== HIGH_REGS
|| to
== HIGH_REGS
)
2073 nds32_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2074 reg_class_t rclass ATTRIBUTE_UNUSED
,
2075 bool in ATTRIBUTE_UNUSED
)
2080 /* This target hook describes the relative costs of RTL expressions.
2081 Return 'true' when all subexpressions of x have been processed.
2082 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
2083 Refer to gcc/rtlanal.c for more information. */
2085 nds32_rtx_costs (rtx x
,
2092 return nds32_rtx_costs_impl (x
, mode
, outer_code
, opno
, total
, speed
);
2096 nds32_address_cost (rtx address
,
2101 return nds32_address_cost_impl (address
, mode
, as
, speed
);
2105 /* Dividing the Output into Sections (Texts, Data, . . . ). */
2107 /* If references to a symbol or a constant must be treated differently
2108 depending on something about the variable or function named by the symbol
2109 (such as what section it is in), we use this hook to store flags
2110 in symbol_ref rtx. */
2112 nds32_encode_section_info (tree decl
, rtx rtl
, int new_decl_p
)
2114 default_encode_section_info (decl
, rtl
, new_decl_p
);
2116 /* For the memory rtx, if it references to rodata section, we can store
2117 NDS32_SYMBOL_FLAG_RODATA flag into symbol_ref rtx so that the
2118 nds32_legitimate_address_p() can determine how to treat such symbol_ref
2119 based on -mcmodel=X and this information. */
2120 if (MEM_P (rtl
) && MEM_READONLY_P (rtl
))
2122 rtx addr
= XEXP (rtl
, 0);
2124 if (GET_CODE (addr
) == SYMBOL_REF
)
2126 /* For (mem (symbol_ref X)) case. */
2127 SYMBOL_REF_FLAGS (addr
) |= NDS32_SYMBOL_FLAG_RODATA
;
2129 else if (GET_CODE (addr
) == CONST
2130 && GET_CODE (XEXP (addr
, 0)) == PLUS
)
2132 /* For (mem (const (plus (symbol_ref X) (const_int N)))) case. */
2133 rtx plus_op
= XEXP (addr
, 0);
2134 rtx op0
= XEXP (plus_op
, 0);
2135 rtx op1
= XEXP (plus_op
, 1);
2137 if (GET_CODE (op0
) == SYMBOL_REF
&& CONST_INT_P (op1
))
2138 SYMBOL_REF_FLAGS (op0
) |= NDS32_SYMBOL_FLAG_RODATA
;
2144 /* Defining the Output Assembler Language. */
2146 /* -- The Overall Framework of an Assembler File. */
2149 nds32_asm_file_start (void)
2151 default_file_start ();
2153 /* Tell assembler which ABI we are using. */
2154 fprintf (asm_out_file
, "\t! ABI version\n");
2155 fprintf (asm_out_file
, "\t.abi_2\n");
2157 /* Tell assembler that this asm code is generated by compiler. */
2158 fprintf (asm_out_file
, "\t! This asm file is generated by compiler\n");
2159 fprintf (asm_out_file
, "\t.flag\tverbatim\n");
2160 /* Give assembler the size of each vector for interrupt handler. */
2161 fprintf (asm_out_file
, "\t! This vector size directive is required "
2162 "for checking inconsistency on interrupt handler\n");
2163 fprintf (asm_out_file
, "\t.vec_size\t%d\n", nds32_isr_vector_size
);
2165 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2168 fprintf (asm_out_file
, "\t! ISA family\t\t: %s\n", "V2");
2170 fprintf (asm_out_file
, "\t! ISA family\t\t: %s\n", "V3");
2172 fprintf (asm_out_file
, "\t! ISA family\t\t: %s\n", "V3M");
2174 if (TARGET_CMODEL_SMALL
)
2175 fprintf (asm_out_file
, "\t! Code model\t\t: %s\n", "SMALL");
2176 if (TARGET_CMODEL_MEDIUM
)
2177 fprintf (asm_out_file
, "\t! Code model\t\t: %s\n", "MEDIUM");
2178 if (TARGET_CMODEL_LARGE
)
2179 fprintf (asm_out_file
, "\t! Code model\t\t: %s\n", "LARGE");
2181 fprintf (asm_out_file
, "\t! Endian setting\t: %s\n",
2182 ((TARGET_BIG_ENDIAN
) ? "big-endian"
2183 : "little-endian"));
2185 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2187 fprintf (asm_out_file
, "\t! Use conditional move\t\t: %s\n",
2188 ((TARGET_CMOV
) ? "Yes"
2190 fprintf (asm_out_file
, "\t! Use performance extension\t: %s\n",
2191 ((TARGET_PERF_EXT
) ? "Yes"
2194 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2196 fprintf (asm_out_file
, "\t! V3PUSH instructions\t: %s\n",
2197 ((TARGET_V3PUSH
) ? "Yes"
2199 fprintf (asm_out_file
, "\t! 16-bit instructions\t: %s\n",
2200 ((TARGET_16_BIT
) ? "Yes"
2202 fprintf (asm_out_file
, "\t! Reduced registers set\t: %s\n",
2203 ((TARGET_REDUCED_REGS
) ? "Yes"
2206 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2209 fprintf (asm_out_file
, "\t! Optimization level\t: -Os\n");
2211 fprintf (asm_out_file
, "\t! Optimization level\t: -O%d\n", optimize
);
2213 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2215 fprintf (asm_out_file
, "\t! Cache block size\t: %d\n",
2216 nds32_cache_block_size
);
2218 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2220 nds32_asm_file_start_for_isr ();
2224 nds32_asm_file_end (void)
2226 nds32_asm_file_end_for_isr ();
2228 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2231 /* -- Output and Generation of Labels. */
2234 nds32_asm_globalize_label (FILE *stream
, const char *name
)
2236 fputs ("\t.global\t", stream
);
2237 assemble_name (stream
, name
);
2238 fputs ("\n", stream
);
2241 /* -- Output of Assembler Instructions. */
2244 nds32_print_operand (FILE *stream
, rtx x
, int code
)
2251 /* Do nothing special. */
2255 /* 'x' is supposed to be CONST_INT, get the value. */
2256 gcc_assert (CONST_INT_P (x
));
2257 op_value
= INTVAL (x
);
2259 /* According to the Andes architecture,
2260 the system/user register index range is 0 ~ 1023.
2261 In order to avoid conflict between user-specified-integer value
2262 and enum-specified-register value,
2263 the 'enum nds32_intrinsic_registers' value
2264 in nds32_intrinsic.h starts from 1024. */
2265 if (op_value
< 1024 && op_value
>= 0)
2267 /* If user gives integer value directly (0~1023),
2268 we just print out the value. */
2269 fprintf (stream
, "%d", op_value
);
2271 else if (op_value
< 0
2272 || op_value
>= ((int) ARRAY_SIZE (nds32_intrinsic_register_names
)
2275 /* The enum index value for array size is out of range. */
2276 error ("intrinsic register index is out of range");
2280 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2281 we can print out register name. Remember to substract 1024. */
2282 fprintf (stream
, "%s",
2283 nds32_intrinsic_register_names
[op_value
- 1024]);
2286 /* No need to handle following process, so return immediately. */
2291 output_operand_lossage ("invalid operand output code");
2295 switch (GET_CODE (x
))
2299 output_addr_const (stream
, x
);
2303 /* Forbid using static chain register ($r16)
2304 on reduced-set registers configuration. */
2305 if (TARGET_REDUCED_REGS
2306 && REGNO (x
) == STATIC_CHAIN_REGNUM
)
2307 sorry ("a nested function is not supported for reduced registers");
2309 /* Normal cases, print out register name. */
2310 fputs (reg_names
[REGNO (x
)], stream
);
2314 output_address (GET_MODE (x
), XEXP (x
, 0));
2320 output_addr_const (stream
, x
);
2324 /* Generally, output_addr_const () is able to handle most cases.
2325 We want to see what CODE could appear,
2326 so we use gcc_unreachable() to stop it. */
2334 nds32_print_operand_address (FILE *stream
, machine_mode
/*mode*/, rtx x
)
2338 switch (GET_CODE (x
))
2342 /* [ + symbol_ref] */
2343 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2344 fputs ("[ + ", stream
);
2345 output_addr_const (stream
, x
);
2346 fputs ("]", stream
);
2350 /* Forbid using static chain register ($r16)
2351 on reduced-set registers configuration. */
2352 if (TARGET_REDUCED_REGS
2353 && REGNO (x
) == STATIC_CHAIN_REGNUM
)
2354 sorry ("a nested function is not supported for reduced registers");
2357 fprintf (stream
, "[%s]", reg_names
[REGNO (x
)]);
2364 /* Checking op0, forbid using static chain register ($r16)
2365 on reduced-set registers configuration. */
2366 if (TARGET_REDUCED_REGS
2368 && REGNO (op0
) == STATIC_CHAIN_REGNUM
)
2369 sorry ("a nested function is not supported for reduced registers");
2370 /* Checking op1, forbid using static chain register ($r16)
2371 on reduced-set registers configuration. */
2372 if (TARGET_REDUCED_REGS
2374 && REGNO (op1
) == STATIC_CHAIN_REGNUM
)
2375 sorry ("a nested function is not supported for reduced registers");
2377 if (REG_P (op0
) && CONST_INT_P (op1
))
2380 fprintf (stream
, "[%s + (%d)]",
2381 reg_names
[REGNO (op0
)], (int)INTVAL (op1
));
2383 else if (REG_P (op0
) && REG_P (op1
))
2386 fprintf (stream
, "[%s + %s]",
2387 reg_names
[REGNO (op0
)], reg_names
[REGNO (op1
)]);
2389 else if (GET_CODE (op0
) == MULT
&& REG_P (op1
))
2392 From observation, the pattern looks like:
2393 (plus:SI (mult:SI (reg:SI 58)
2394 (const_int 4 [0x4]))
2398 /* We need to set sv to output shift value. */
2399 if (INTVAL (XEXP (op0
, 1)) == 1)
2401 else if (INTVAL (XEXP (op0
, 1)) == 2)
2403 else if (INTVAL (XEXP (op0
, 1)) == 4)
2408 fprintf (stream
, "[%s + %s << %d]",
2409 reg_names
[REGNO (op1
)],
2410 reg_names
[REGNO (XEXP (op0
, 0))],
2415 /* The control flow is not supposed to be here. */
2423 /* (post_modify (regA) (plus (regA) (regB)))
2424 (post_modify (regA) (plus (regA) (const_int)))
2425 We would like to extract
2426 regA and regB (or const_int) from plus rtx. */
2427 op0
= XEXP (XEXP (x
, 1), 0);
2428 op1
= XEXP (XEXP (x
, 1), 1);
2430 /* Checking op0, forbid using static chain register ($r16)
2431 on reduced-set registers configuration. */
2432 if (TARGET_REDUCED_REGS
2434 && REGNO (op0
) == STATIC_CHAIN_REGNUM
)
2435 sorry ("a nested function is not supported for reduced registers");
2436 /* Checking op1, forbid using static chain register ($r16)
2437 on reduced-set registers configuration. */
2438 if (TARGET_REDUCED_REGS
2440 && REGNO (op1
) == STATIC_CHAIN_REGNUM
)
2441 sorry ("a nested function is not supported for reduced registers");
2443 if (REG_P (op0
) && REG_P (op1
))
2446 fprintf (stream
, "[%s], %s",
2447 reg_names
[REGNO (op0
)], reg_names
[REGNO (op1
)]);
2449 else if (REG_P (op0
) && CONST_INT_P (op1
))
2452 fprintf (stream
, "[%s], %d",
2453 reg_names
[REGNO (op0
)], (int)INTVAL (op1
));
2457 /* The control flow is not supposed to be here. */
2468 /* Checking op0, forbid using static chain register ($r16)
2469 on reduced-set registers configuration. */
2470 if (TARGET_REDUCED_REGS
2472 && REGNO (op0
) == STATIC_CHAIN_REGNUM
)
2473 sorry ("a nested function is not supported for reduced registers");
2477 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
2478 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2479 We only need to deal with register Ra. */
2480 fprintf (stream
, "[%s]", reg_names
[REGNO (op0
)]);
2484 /* The control flow is not supposed to be here. */
2492 /* Generally, output_addr_const () is able to handle most cases.
2493 We want to see what CODE could appear,
2494 so we use gcc_unreachable() to stop it. */
2502 /* Defining target-specific uses of __attribute__. */
2504 /* Add some checking after merging attributes. */
2506 nds32_merge_decl_attributes (tree olddecl
, tree newdecl
)
2508 tree combined_attrs
;
2510 /* Create combined attributes. */
2511 combined_attrs
= merge_attributes (DECL_ATTRIBUTES (olddecl
),
2512 DECL_ATTRIBUTES (newdecl
));
2514 /* Since newdecl is acutally a duplicate of olddecl,
2515 we can take olddecl for some operations. */
2516 if (TREE_CODE (olddecl
) == FUNCTION_DECL
)
2518 /* Check isr-specific attributes conflict. */
2519 nds32_check_isr_attrs_conflict (olddecl
, combined_attrs
);
2522 return combined_attrs
;
2525 /* Add some checking when inserting attributes. */
2527 nds32_insert_attributes (tree decl
, tree
*attributes
)
2529 /* For function declaration, we need to check isr-specific attributes:
2530 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
2531 2. Check valid integer value for interrupt/exception.
2532 3. Check valid integer value for reset.
2533 4. Check valid function for nmi/warm. */
2534 if (TREE_CODE (decl
) == FUNCTION_DECL
)
2537 tree intr
, excp
, reset
;
2539 /* Pick up function attributes. */
2540 func_attrs
= *attributes
;
2542 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
2543 nds32_check_isr_attrs_conflict (decl
, func_attrs
);
2545 /* Now we are starting to check valid id value
2546 for interrupt/exception/reset.
2547 Note that we ONLY check its validity here.
2548 To construct isr vector information, it is still performed
2549 by nds32_construct_isr_vectors_information(). */
2550 intr
= lookup_attribute ("interrupt", func_attrs
);
2551 excp
= lookup_attribute ("exception", func_attrs
);
2552 reset
= lookup_attribute ("reset", func_attrs
);
2556 /* Deal with interrupt/exception. */
2558 unsigned int lower_bound
, upper_bound
;
2560 /* The way to handle interrupt or exception is the same,
2561 we just need to take care of actual vector number.
2562 For interrupt(0..63), the actual vector number is (9..72).
2563 For exception(1..8), the actual vector number is (1..8). */
2564 lower_bound
= (intr
) ? (0) : (1);
2565 upper_bound
= (intr
) ? (63) : (8);
2567 /* Prepare id list so that we can traverse id value. */
2568 id_list
= (intr
) ? (TREE_VALUE (intr
)) : (TREE_VALUE (excp
));
2570 /* 2. Check valid integer value for interrupt/exception. */
2575 /* Pick up each vector id value. */
2576 id
= TREE_VALUE (id_list
);
2577 /* Issue error if it is not a valid integer value. */
2578 if (TREE_CODE (id
) != INTEGER_CST
2579 || wi::ltu_p (id
, lower_bound
)
2580 || wi::gtu_p (id
, upper_bound
))
2581 error ("invalid id value for interrupt/exception attribute");
2583 /* Advance to next id. */
2584 id_list
= TREE_CHAIN (id_list
);
2589 /* Deal with reset. */
2593 unsigned int lower_bound
;
2594 unsigned int upper_bound
;
2596 /* Prepare id_list and identify id value so that
2597 we can check if total number of vectors is valid. */
2598 id_list
= TREE_VALUE (reset
);
2599 id
= TREE_VALUE (id_list
);
2601 /* The maximum numbers for user's interrupt is 64. */
2605 /* 3. Check valid integer value for reset. */
2606 if (TREE_CODE (id
) != INTEGER_CST
2607 || wi::ltu_p (id
, lower_bound
)
2608 || wi::gtu_p (id
, upper_bound
))
2609 error ("invalid id value for reset attribute");
2611 /* 4. Check valid function for nmi/warm. */
2612 nmi
= lookup_attribute ("nmi", func_attrs
);
2613 warm
= lookup_attribute ("warm", func_attrs
);
2615 if (nmi
!= NULL_TREE
)
2620 nmi_func_list
= TREE_VALUE (nmi
);
2621 nmi_func
= TREE_VALUE (nmi_func_list
);
2623 /* Issue error if it is not a valid nmi function. */
2624 if (TREE_CODE (nmi_func
) != IDENTIFIER_NODE
)
2625 error ("invalid nmi function for reset attribute");
2628 if (warm
!= NULL_TREE
)
2630 tree warm_func_list
;
2633 warm_func_list
= TREE_VALUE (warm
);
2634 warm_func
= TREE_VALUE (warm_func_list
);
2636 /* Issue error if it is not a valid warm function. */
2637 if (TREE_CODE (warm_func
) != IDENTIFIER_NODE
)
2638 error ("invalid warm function for reset attribute");
2643 /* No interrupt, exception, or reset attribute is set. */
2650 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED
,
2651 tree pop_target ATTRIBUTE_UNUSED
)
2653 /* Currently, we do not parse any pragma target by ourself,
2654 so just simply return false. */
2659 nds32_option_override (void)
2661 /* After all the command options have been parsed,
2662 we shall deal with some flags for changing compiler settings. */
2664 /* At first, we check if we have to strictly
2665 set some flags based on ISA family. */
2668 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
2669 target_flags
&= ~MASK_V3PUSH
;
2673 /* Under V3 ISA, currently nothing should be strictly set. */
2677 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
2678 target_flags
|= MASK_REDUCED_REGS
;
2679 /* Under V3M ISA, we need to strictly disable TARGET_PERF_EXT. */
2680 target_flags
&= ~MASK_PERF_EXT
;
2683 /* See if we are using reduced-set registers:
2684 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
2685 If so, we must forbid using $r11~$r14, $r16~$r27. */
2686 if (TARGET_REDUCED_REGS
)
2690 /* Prevent register allocator from
2691 choosing it as doing register allocation. */
2692 for (r
= 11; r
<= 14; r
++)
2693 fixed_regs
[r
] = call_used_regs
[r
] = 1;
2694 for (r
= 16; r
<= 27; r
++)
2695 fixed_regs
[r
] = call_used_regs
[r
] = 1;
2700 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
2701 target_flags
&= ~MASK_V3PUSH
;
2704 /* Currently, we don't support PIC code generation yet. */
2706 sorry ("position-independent code not supported");
2710 /* Miscellaneous Parameters. */
2713 nds32_init_builtins (void)
2715 nds32_init_builtins_impl ();
2719 nds32_expand_builtin (tree exp
,
2725 return nds32_expand_builtin_impl (exp
, target
, subtarget
, mode
, ignore
);
2729 /* ------------------------------------------------------------------------ */
2731 /* PART 4: Implemet extern function definitions,
2732 the prototype is in nds32-protos.h. */
2734 /* Defining Data Structures for Per-function Information. */
2737 nds32_init_expanders (void)
2739 /* Arrange to initialize and mark the machine per-function status. */
2740 init_machine_status
= nds32_init_machine_status
;
2744 /* Register Usage. */
2746 /* -- How Values Fit in Registers. */
2748 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2751 nds32_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2753 /* Restrict double-word quantities to even register pairs. */
2754 if (targetm
.hard_regno_nregs (regno
, mode
) == 1
2761 #undef TARGET_HARD_REGNO_MODE_OK
2762 #define TARGET_HARD_REGNO_MODE_OK nds32_hard_regno_mode_ok
2764 /* Implement TARGET_MODES_TIEABLE_P. We can use general registers to
2765 tie QI/HI/SI modes together. */
2768 nds32_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2770 return (GET_MODE_CLASS (mode1
) == MODE_INT
2771 && GET_MODE_CLASS (mode2
) == MODE_INT
2772 && GET_MODE_SIZE (mode1
) <= UNITS_PER_WORD
2773 && GET_MODE_SIZE (mode2
) <= UNITS_PER_WORD
);
2776 #undef TARGET_MODES_TIEABLE_P
2777 #define TARGET_MODES_TIEABLE_P nds32_modes_tieable_p
2779 /* Register Classes. */
2782 nds32_regno_reg_class (int regno
)
2784 /* Refer to nds32.h for more register class details. */
2786 if (regno
>= 0 && regno
<= 7)
2788 else if (regno
>= 8 && regno
<= 11)
2790 else if (regno
>= 12 && regno
<= 14)
2792 else if (regno
== 15)
2794 else if (regno
>= 16 && regno
<= 19)
2796 else if (regno
>= 20 && regno
<= 31)
2798 else if (regno
== 32 || regno
== 33)
2805 /* Stack Layout and Calling Conventions. */
2807 /* -- Basic Stack Layout. */
2810 nds32_return_addr_rtx (int count
,
2811 rtx frameaddr ATTRIBUTE_UNUSED
)
2813 /* There is no way to determine the return address
2814 if frameaddr is the frame that has 'count' steps
2815 up from current frame. */
2819 /* If count == 0, it means we are at current frame,
2820 the return address is $r30 ($lp). */
2821 return get_hard_reg_initial_val (Pmode
, LP_REGNUM
);
2824 /* -- Eliminating Frame Pointer and Arg Pointer. */
2827 nds32_initial_elimination_offset (unsigned int from_reg
, unsigned int to_reg
)
2829 HOST_WIDE_INT offset
;
2831 /* Compute and setup stack frame size.
2832 The result will be in cfun->machine. */
2833 nds32_compute_stack_frame ();
2835 /* Remember to consider
2836 cfun->machine->callee_saved_area_gpr_padding_bytes
2837 when calculating offset. */
2838 if (from_reg
== ARG_POINTER_REGNUM
&& to_reg
== STACK_POINTER_REGNUM
)
2840 offset
= (cfun
->machine
->fp_size
2841 + cfun
->machine
->gp_size
2842 + cfun
->machine
->lp_size
2843 + cfun
->machine
->callee_saved_gpr_regs_size
2844 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
2845 + cfun
->machine
->local_size
2846 + cfun
->machine
->out_args_size
);
2848 else if (from_reg
== ARG_POINTER_REGNUM
2849 && to_reg
== HARD_FRAME_POINTER_REGNUM
)
2853 else if (from_reg
== FRAME_POINTER_REGNUM
2854 && to_reg
== STACK_POINTER_REGNUM
)
2856 offset
= (cfun
->machine
->local_size
+ cfun
->machine
->out_args_size
);
2858 else if (from_reg
== FRAME_POINTER_REGNUM
2859 && to_reg
== HARD_FRAME_POINTER_REGNUM
)
2861 offset
= (-1) * (cfun
->machine
->fp_size
2862 + cfun
->machine
->gp_size
2863 + cfun
->machine
->lp_size
2864 + cfun
->machine
->callee_saved_gpr_regs_size
2865 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
);
2875 /* -- Passing Arguments in Registers. */
2878 nds32_init_cumulative_args (CUMULATIVE_ARGS
*cum
,
2879 tree fntype ATTRIBUTE_UNUSED
,
2880 rtx libname ATTRIBUTE_UNUSED
,
2881 tree fndecl ATTRIBUTE_UNUSED
,
2882 int n_named_args ATTRIBUTE_UNUSED
)
2884 /* Initial available registers
2885 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
2886 for passing arguments. */
2887 cum
->gpr_offset
= 0;
2890 /* -- Function Entry and Exit. */
2892 /* Function for normal multiple push prologue. */
2894 nds32_expand_prologue (void)
2901 rtx fp_adjust_insn
, sp_adjust_insn
;
2903 /* Compute and setup stack frame size.
2904 The result will be in cfun->machine. */
2905 nds32_compute_stack_frame ();
2907 /* If this is a variadic function, first we need to push argument
2908 registers that hold the unnamed argument value. */
2909 if (cfun
->machine
->va_args_size
!= 0)
2911 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->va_args_first_regno
);
2912 Re
= gen_rtx_REG (SImode
, cfun
->machine
->va_args_last_regno
);
2913 /* No need to push $fp, $gp, or $lp, so use GEN_INT(0). */
2914 nds32_emit_stack_push_multiple (Rb
, Re
, GEN_INT (0), true);
2916 /* We may also need to adjust stack pointer for padding bytes
2917 because varargs may cause $sp not 8-byte aligned. */
2918 if (cfun
->machine
->va_args_area_padding_bytes
)
2920 /* Generate sp adjustment instruction. */
2921 sp_adjust
= cfun
->machine
->va_args_area_padding_bytes
;
2922 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
2924 GEN_INT (-1 * sp_adjust
));
2926 /* Emit rtx into instructions list and receive INSN rtx form. */
2927 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
2929 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2930 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2931 generate CFI (Call Frame Information) stuff. */
2932 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
2936 /* If the function is 'naked',
2937 we do not have to generate prologue code fragment. */
2938 if (cfun
->machine
->naked_p
)
2941 /* Get callee_first_regno and callee_last_regno. */
2942 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
2943 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
2945 /* nds32_emit_stack_push_multiple(first_regno, last_regno),
2946 the pattern 'stack_push_multiple' is implemented in nds32.md.
2947 For En4 field, we have to calculate its constant value.
2948 Refer to Andes ISA for more information. */
2950 if (cfun
->machine
->fp_size
)
2952 if (cfun
->machine
->gp_size
)
2954 if (cfun
->machine
->lp_size
)
2957 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2958 to be saved, we don't have to create multiple push instruction.
2959 Otherwise, a multiple push instruction is needed. */
2960 if (!(REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
&& en4_const
== 0))
2962 /* Create multiple push instruction rtx. */
2963 nds32_emit_stack_push_multiple (Rb
, Re
, GEN_INT (en4_const
), false);
2966 /* Check frame_pointer_needed to see
2967 if we shall emit fp adjustment instruction. */
2968 if (frame_pointer_needed
)
2970 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
2971 + (4 * callee-saved-registers)
2972 Note: No need to adjust
2973 cfun->machine->callee_saved_area_gpr_padding_bytes,
2974 because, at this point, stack pointer is just
2975 at the position after push instruction. */
2976 fp_adjust
= cfun
->machine
->fp_size
2977 + cfun
->machine
->gp_size
2978 + cfun
->machine
->lp_size
2979 + cfun
->machine
->callee_saved_gpr_regs_size
;
2980 fp_adjust_insn
= gen_addsi3 (hard_frame_pointer_rtx
,
2982 GEN_INT (fp_adjust
));
2983 /* Emit rtx into instructions list and receive INSN rtx form. */
2984 fp_adjust_insn
= emit_insn (fp_adjust_insn
);
2986 /* The insn rtx 'fp_adjust_insn' will change frame layout. */
2987 RTX_FRAME_RELATED_P (fp_adjust_insn
) = 1;
2990 /* Adjust $sp = $sp - local_size - out_args_size
2991 - callee_saved_area_gpr_padding_bytes. */
2992 sp_adjust
= cfun
->machine
->local_size
2993 + cfun
->machine
->out_args_size
2994 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
2995 /* sp_adjust value may be out of range of the addi instruction,
2996 create alternative add behavior with TA_REGNUM if necessary,
2997 using NEGATIVE value to tell that we are decreasing address. */
2998 sp_adjust
= nds32_force_addi_stack_int ( (-1) * sp_adjust
);
3001 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
3002 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3004 GEN_INT (-1 * sp_adjust
));
3005 /* Emit rtx into instructions list and receive INSN rtx form. */
3006 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3008 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3009 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3010 generate CFI (Call Frame Information) stuff. */
3011 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3014 /* Prevent the instruction scheduler from
3015 moving instructions across the boundary. */
3016 emit_insn (gen_blockage ());
3019 /* Function for normal multiple pop epilogue. */
3021 nds32_expand_epilogue (bool sibcall_p
)
3029 /* Compute and setup stack frame size.
3030 The result will be in cfun->machine. */
3031 nds32_compute_stack_frame ();
3033 /* Prevent the instruction scheduler from
3034 moving instructions across the boundary. */
3035 emit_insn (gen_blockage ());
3037 /* If the function is 'naked', we do not have to generate
3038 epilogue code fragment BUT 'ret' instruction.
3039 However, if this function is also a variadic function,
3040 we need to create adjust stack pointer before 'ret' instruction. */
3041 if (cfun
->machine
->naked_p
)
3043 /* If this is a variadic function, we do not have to restore argument
3044 registers but need to adjust stack pointer back to previous stack
3045 frame location before return. */
3046 if (cfun
->machine
->va_args_size
!= 0)
3048 /* Generate sp adjustment instruction.
3049 We need to consider padding bytes here. */
3050 sp_adjust
= cfun
->machine
->va_args_size
3051 + cfun
->machine
->va_args_area_padding_bytes
;
3052 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3054 GEN_INT (sp_adjust
));
3055 /* Emit rtx into instructions list and receive INSN rtx form. */
3056 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3058 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3059 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3060 generate CFI (Call Frame Information) stuff. */
3061 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3064 /* Generate return instruction by using 'return_internal' pattern.
3065 Make sure this instruction is after gen_blockage(). */
3067 emit_jump_insn (gen_return_internal ());
3071 if (frame_pointer_needed
)
3073 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
3074 - (4 * callee-saved-registers)
3075 Note: No need to adjust
3076 cfun->machine->callee_saved_area_gpr_padding_bytes,
3077 because we want to adjust stack pointer
3078 to the position for pop instruction. */
3079 sp_adjust
= cfun
->machine
->fp_size
3080 + cfun
->machine
->gp_size
3081 + cfun
->machine
->lp_size
3082 + cfun
->machine
->callee_saved_gpr_regs_size
;
3083 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3084 hard_frame_pointer_rtx
,
3085 GEN_INT (-1 * sp_adjust
));
3086 /* Emit rtx into instructions list and receive INSN rtx form. */
3087 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3089 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3090 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3094 /* If frame pointer is NOT needed,
3095 we cannot calculate the sp adjustment from frame pointer.
3096 Instead, we calculate the adjustment by local_size,
3097 out_args_size, and callee_saved_area_padding_bytes.
3098 Notice that such sp adjustment value may be out of range,
3099 so we have to deal with it as well. */
3101 /* Adjust $sp = $sp + local_size + out_args_size
3102 + callee_saved_area_padding_bytes. */
3103 sp_adjust
= cfun
->machine
->local_size
3104 + cfun
->machine
->out_args_size
3105 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3106 /* sp_adjust value may be out of range of the addi instruction,
3107 create alternative add behavior with TA_REGNUM if necessary,
3108 using POSITIVE value to tell that we are increasing address. */
3109 sp_adjust
= nds32_force_addi_stack_int (sp_adjust
);
3112 /* Generate sp adjustment instruction
3113 if and only if sp_adjust != 0. */
3114 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3116 GEN_INT (sp_adjust
));
3117 /* Emit rtx into instructions list and receive INSN rtx form. */
3118 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3120 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3121 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3125 /* Get callee_first_regno and callee_last_regno. */
3126 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
3127 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
3129 /* nds32_emit_stack_pop_multiple(first_regno, last_regno),
3130 the pattern 'stack_pop_multiple' is implementad in nds32.md.
3131 For En4 field, we have to calculate its constant value.
3132 Refer to Andes ISA for more information. */
3134 if (cfun
->machine
->fp_size
)
3136 if (cfun
->machine
->gp_size
)
3138 if (cfun
->machine
->lp_size
)
3141 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3142 to be saved, we don't have to create multiple pop instruction.
3143 Otherwise, a multiple pop instruction is needed. */
3144 if (!(REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
&& en4_const
== 0))
3146 /* Create multiple pop instruction rtx. */
3147 nds32_emit_stack_pop_multiple (Rb
, Re
, GEN_INT (en4_const
));
3150 /* If this is a variadic function, we do not have to restore argument
3151 registers but need to adjust stack pointer back to previous stack
3152 frame location before return. */
3153 if (cfun
->machine
->va_args_size
!= 0)
3155 /* Generate sp adjustment instruction.
3156 We need to consider padding bytes here. */
3157 sp_adjust
= cfun
->machine
->va_args_size
3158 + cfun
->machine
->va_args_area_padding_bytes
;
3159 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3161 GEN_INT (sp_adjust
));
3162 /* Emit rtx into instructions list and receive INSN rtx form. */
3163 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3165 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3166 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3167 generate CFI (Call Frame Information) stuff. */
3168 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3171 /* Generate return instruction. */
3173 emit_jump_insn (gen_return_internal ());
3176 /* Function for v3push prologue. */
3178 nds32_expand_prologue_v3push (void)
3184 rtx fp_adjust_insn
, sp_adjust_insn
;
3186 /* Compute and setup stack frame size.
3187 The result will be in cfun->machine. */
3188 nds32_compute_stack_frame ();
3190 /* If the function is 'naked',
3191 we do not have to generate prologue code fragment. */
3192 if (cfun
->machine
->naked_p
)
3195 /* Get callee_first_regno and callee_last_regno. */
3196 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
3197 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
3199 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
3200 where imm8u has to be 8-byte alignment. */
3201 sp_adjust
= cfun
->machine
->local_size
3202 + cfun
->machine
->out_args_size
3203 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3205 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust
))
3206 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust
))
3208 /* We can use 'push25 Re,imm8u'. */
3210 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3211 the pattern 'stack_v3push' is implemented in nds32.md.
3212 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3213 nds32_emit_stack_v3push (Rb
, Re
,
3214 GEN_INT (14), GEN_INT (sp_adjust
));
3216 /* Check frame_pointer_needed to see
3217 if we shall emit fp adjustment instruction. */
3218 if (frame_pointer_needed
)
3220 /* adjust $fp = $sp + 4 ($fp size)
3223 + (4 * n) (callee-saved registers)
3224 + sp_adjust ('push25 Re,imm8u')
3225 Note: Since we use 'push25 Re,imm8u',
3226 the position of stack pointer is further
3227 changed after push instruction.
3228 Hence, we need to take sp_adjust value
3229 into consideration. */
3230 fp_adjust
= cfun
->machine
->fp_size
3231 + cfun
->machine
->gp_size
3232 + cfun
->machine
->lp_size
3233 + cfun
->machine
->callee_saved_gpr_regs_size
3235 fp_adjust_insn
= gen_addsi3 (hard_frame_pointer_rtx
,
3237 GEN_INT (fp_adjust
));
3238 /* Emit rtx into instructions list and receive INSN rtx form. */
3239 fp_adjust_insn
= emit_insn (fp_adjust_insn
);
3244 /* We have to use 'push25 Re,0' and
3245 expand one more instruction to adjust $sp later. */
3247 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3248 the pattern 'stack_v3push' is implemented in nds32.md.
3249 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3250 nds32_emit_stack_v3push (Rb
, Re
,
3251 GEN_INT (14), GEN_INT (0));
3253 /* Check frame_pointer_needed to see
3254 if we shall emit fp adjustment instruction. */
3255 if (frame_pointer_needed
)
3257 /* adjust $fp = $sp + 4 ($fp size)
3260 + (4 * n) (callee-saved registers)
3261 Note: Since we use 'push25 Re,0',
3262 the stack pointer is just at the position
3263 after push instruction.
3264 No need to take sp_adjust into consideration. */
3265 fp_adjust
= cfun
->machine
->fp_size
3266 + cfun
->machine
->gp_size
3267 + cfun
->machine
->lp_size
3268 + cfun
->machine
->callee_saved_gpr_regs_size
;
3269 fp_adjust_insn
= gen_addsi3 (hard_frame_pointer_rtx
,
3271 GEN_INT (fp_adjust
));
3272 /* Emit rtx into instructions list and receive INSN rtx form. */
3273 fp_adjust_insn
= emit_insn (fp_adjust_insn
);
3276 /* Because we use 'push25 Re,0',
3277 we need to expand one more instruction to adjust $sp.
3278 However, sp_adjust value may be out of range of the addi instruction,
3279 create alternative add behavior with TA_REGNUM if necessary,
3280 using NEGATIVE value to tell that we are decreasing address. */
3281 sp_adjust
= nds32_force_addi_stack_int ( (-1) * sp_adjust
);
3284 /* Generate sp adjustment instruction
3285 if and only if sp_adjust != 0. */
3286 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3288 GEN_INT (-1 * sp_adjust
));
3289 /* Emit rtx into instructions list and receive INSN rtx form. */
3290 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3292 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3293 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3294 generate CFI (Call Frame Information) stuff. */
3295 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3299 /* Prevent the instruction scheduler from
3300 moving instructions across the boundary. */
3301 emit_insn (gen_blockage ());
3304 /* Function for v3pop epilogue. */
3306 nds32_expand_epilogue_v3pop (bool sibcall_p
)
3313 /* Compute and setup stack frame size.
3314 The result will be in cfun->machine. */
3315 nds32_compute_stack_frame ();
3317 /* Prevent the instruction scheduler from
3318 moving instructions across the boundary. */
3319 emit_insn (gen_blockage ());
3321 /* If the function is 'naked', we do not have to generate
3322 epilogue code fragment BUT 'ret' instruction. */
3323 if (cfun
->machine
->naked_p
)
3325 /* Generate return instruction by using 'return_internal' pattern.
3326 Make sure this instruction is after gen_blockage(). */
3328 emit_jump_insn (gen_return_internal ());
3332 /* Get callee_first_regno and callee_last_regno. */
3333 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
3334 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
3336 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3337 where imm8u has to be 8-byte alignment. */
3338 sp_adjust
= cfun
->machine
->local_size
3339 + cfun
->machine
->out_args_size
3340 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3342 /* We have to consider alloca issue as well.
3343 If the function does call alloca(), the stack pointer is not fixed.
3344 In that case, we cannot use 'pop25 Re,imm8u' directly.
3345 We have to caculate stack pointer from frame pointer
3346 and then use 'pop25 Re,0'.
3347 Of course, the frame_pointer_needed should be nonzero
3348 if the function calls alloca(). */
3349 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust
))
3350 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust
)
3351 && !cfun
->calls_alloca
)
3353 /* We can use 'pop25 Re,imm8u'. */
3355 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3356 the pattern 'stack_v3pop' is implementad in nds32.md.
3357 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3358 nds32_emit_stack_v3pop (Rb
, Re
,
3359 GEN_INT (14), GEN_INT (sp_adjust
));
3363 /* We have to use 'pop25 Re,0', and prior to it,
3364 we must expand one more instruction to adjust $sp. */
3366 if (frame_pointer_needed
)
3368 /* adjust $sp = $fp - 4 ($fp size)
3371 - (4 * n) (callee-saved registers)
3372 Note: No need to adjust
3373 cfun->machine->callee_saved_area_gpr_padding_bytes,
3374 because we want to adjust stack pointer
3375 to the position for pop instruction. */
3376 sp_adjust
= cfun
->machine
->fp_size
3377 + cfun
->machine
->gp_size
3378 + cfun
->machine
->lp_size
3379 + cfun
->machine
->callee_saved_gpr_regs_size
;
3380 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3381 hard_frame_pointer_rtx
,
3382 GEN_INT (-1 * sp_adjust
));
3383 /* Emit rtx into instructions list and receive INSN rtx form. */
3384 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3388 /* If frame pointer is NOT needed,
3389 we cannot calculate the sp adjustment from frame pointer.
3390 Instead, we calculate the adjustment by local_size,
3391 out_args_size, and callee_saved_area_padding_bytes.
3392 Notice that such sp adjustment value may be out of range,
3393 so we have to deal with it as well. */
3395 /* Adjust $sp = $sp + local_size + out_args_size
3396 + callee_saved_area_gpr_padding_bytes. */
3397 sp_adjust
= cfun
->machine
->local_size
3398 + cfun
->machine
->out_args_size
3399 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3400 /* sp_adjust value may be out of range of the addi instruction,
3401 create alternative add behavior with TA_REGNUM if necessary,
3402 using POSITIVE value to tell that we are increasing address. */
3403 sp_adjust
= nds32_force_addi_stack_int (sp_adjust
);
3406 /* Generate sp adjustment instruction
3407 if and only if sp_adjust != 0. */
3408 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3410 GEN_INT (sp_adjust
));
3411 /* Emit rtx into instructions list and receive INSN rtx form. */
3412 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3416 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3417 the pattern 'stack_v3pop' is implementad in nds32.md. */
3418 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3419 nds32_emit_stack_v3pop (Rb
, Re
,
3420 GEN_INT (14), GEN_INT (0));
3423 /* Generate return instruction. */
3424 emit_jump_insn (gen_pop25return ());
3427 /* Return nonzero if this function is known to have a null epilogue.
3428 This allows the optimizer to omit jumps to jumps if no stack
3431 nds32_can_use_return_insn (void)
3433 /* Prior to reloading, we can't tell how many registers must be saved.
3434 Thus we can not determine whether this function has null epilogue. */
3435 if (!reload_completed
)
3438 /* If no stack was created, two conditions must be satisfied:
3439 1. This is a naked function.
3440 So there is no callee-saved, local size, or outgoing size.
3441 2. This is NOT a variadic function.
3442 So there is no pushing arguement registers into the stack. */
3443 return (cfun
->machine
->naked_p
&& (cfun
->machine
->va_args_size
== 0));
3446 /* ------------------------------------------------------------------------ */
3448 /* Function to test 333-form for load/store instructions.
3449 This is auxiliary extern function for auxiliary macro in nds32.h.
3450 Because it is a little complicated, we use function instead of macro. */
3452 nds32_ls_333_p (rtx rt
, rtx ra
, rtx imm
, machine_mode mode
)
3454 if (REGNO_REG_CLASS (REGNO (rt
)) == LOW_REGS
3455 && REGNO_REG_CLASS (REGNO (ra
)) == LOW_REGS
)
3457 if (GET_MODE_SIZE (mode
) == 4)
3458 return satisfies_constraint_Iu05 (imm
);
3460 if (GET_MODE_SIZE (mode
) == 2)
3461 return satisfies_constraint_Iu04 (imm
);
3463 if (GET_MODE_SIZE (mode
) == 1)
3464 return satisfies_constraint_Iu03 (imm
);
3471 /* Computing the Length of an Insn.
3472 Modifies the length assigned to instruction INSN.
3473 LEN is the initially computed length of the insn. */
3475 nds32_adjust_insn_length (rtx_insn
*insn
, int length
)
3479 switch (recog_memoized (insn
))
3481 case CODE_FOR_move_df
:
3482 case CODE_FOR_move_di
:
3483 /* Adjust length of movd44 to 2. */
3484 src
= XEXP (PATTERN (insn
), 1);
3485 dst
= XEXP (PATTERN (insn
), 0);
3489 && (REGNO (src
) % 2) == 0
3490 && (REGNO (dst
) % 2) == 0)
3502 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
3504 nds32_target_alignment (rtx_insn
*label
)
3511 insn
= next_active_insn (label
);
3515 else if ((get_attr_length (insn
) % 4) == 0)
3521 /* ------------------------------------------------------------------------ */
3523 /* PART 5: Initialize target hook structure and definitions. */
3525 /* Controlling the Compilation Driver. */
3528 /* Run-time Target Specification. */
3531 /* Defining Data Structures for Per-function Information. */
3534 /* Storage Layout. */
3536 #undef TARGET_PROMOTE_FUNCTION_MODE
3537 #define TARGET_PROMOTE_FUNCTION_MODE \
3538 default_promote_function_mode_always_promote
3541 /* Layout of Source Language Data Types. */
3544 /* Register Usage. */
3546 /* -- Basic Characteristics of Registers. */
3548 /* -- Order of Allocation of Registers. */
3550 /* -- How Values Fit in Registers. */
3552 /* -- Handling Leaf Functions. */
3554 /* -- Registers That Form a Stack. */
3557 /* Register Classes. */
3559 #undef TARGET_CLASS_MAX_NREGS
3560 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
3562 #undef TARGET_REGISTER_PRIORITY
3563 #define TARGET_REGISTER_PRIORITY nds32_register_priority
3566 /* Obsolete Macros for Defining Constraints. */
3569 /* Stack Layout and Calling Conventions. */
3571 /* -- Basic Stack Layout. */
3573 /* -- Exception Handling Support. */
3575 /* -- Specifying How Stack Checking is Done. */
3577 /* -- Registers That Address the Stack Frame. */
3579 /* -- Eliminating Frame Pointer and Arg Pointer. */
3581 #undef TARGET_CAN_ELIMINATE
3582 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
3584 /* -- Passing Function Arguments on the Stack. */
3586 /* -- Passing Arguments in Registers. */
3588 #undef TARGET_FUNCTION_ARG
3589 #define TARGET_FUNCTION_ARG nds32_function_arg
3591 #undef TARGET_MUST_PASS_IN_STACK
3592 #define TARGET_MUST_PASS_IN_STACK nds32_must_pass_in_stack
3594 #undef TARGET_ARG_PARTIAL_BYTES
3595 #define TARGET_ARG_PARTIAL_BYTES nds32_arg_partial_bytes
3597 #undef TARGET_FUNCTION_ARG_ADVANCE
3598 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
3600 #undef TARGET_FUNCTION_ARG_BOUNDARY
3601 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
3603 /* -- How Scalar Function Values Are Returned. */
3605 #undef TARGET_FUNCTION_VALUE
3606 #define TARGET_FUNCTION_VALUE nds32_function_value
3608 #undef TARGET_LIBCALL_VALUE
3609 #define TARGET_LIBCALL_VALUE nds32_libcall_value
3611 #undef TARGET_FUNCTION_VALUE_REGNO_P
3612 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
3614 /* -- How Large Values Are Returned. */
3616 /* -- Caller-Saves Register Allocation. */
3618 /* -- Function Entry and Exit. */
3620 #undef TARGET_ASM_FUNCTION_PROLOGUE
3621 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
3623 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
3624 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
3626 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
3627 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
3629 #undef TARGET_ASM_FUNCTION_EPILOGUE
3630 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
3632 #undef TARGET_ASM_OUTPUT_MI_THUNK
3633 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
3635 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3636 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
3638 /* -- Generating Code for Profiling. */
3640 /* -- Permitting tail calls. */
3642 #undef TARGET_WARN_FUNC_RETURN
3643 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
3645 /* Stack smashing protection. */
3648 /* Implementing the Varargs Macros. */
3650 #undef TARGET_SETUP_INCOMING_VARARGS
3651 #define TARGET_SETUP_INCOMING_VARARGS nds32_setup_incoming_varargs
3653 #undef TARGET_STRICT_ARGUMENT_NAMING
3654 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
3657 /* Trampolines for Nested Functions. */
3659 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
3660 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
3662 #undef TARGET_TRAMPOLINE_INIT
3663 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
3666 /* Implicit Calls to Library Routines. */
3669 /* Addressing Modes. */
3671 #undef TARGET_LEGITIMATE_ADDRESS_P
3672 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
3675 /* Anchored Addresses. */
3678 /* Condition Code Status. */
3680 /* -- Representation of condition codes using (cc0). */
3682 /* -- Representation of condition codes using registers. */
3684 /* -- Macros to control conditional execution. */
3687 /* Describing Relative Costs of Operations. */
3689 #undef TARGET_REGISTER_MOVE_COST
3690 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
3692 #undef TARGET_MEMORY_MOVE_COST
3693 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
3695 #undef TARGET_RTX_COSTS
3696 #define TARGET_RTX_COSTS nds32_rtx_costs
3698 #undef TARGET_ADDRESS_COST
3699 #define TARGET_ADDRESS_COST nds32_address_cost
3702 /* Adjusting the Instruction Scheduler. */
3705 /* Dividing the Output into Sections (Texts, Data, . . . ). */
3707 #undef TARGET_ENCODE_SECTION_INFO
3708 #define TARGET_ENCODE_SECTION_INFO nds32_encode_section_info
3711 /* Position Independent Code. */
3714 /* Defining the Output Assembler Language. */
3716 /* -- The Overall Framework of an Assembler File. */
3718 #undef TARGET_ASM_FILE_START
3719 #define TARGET_ASM_FILE_START nds32_asm_file_start
3720 #undef TARGET_ASM_FILE_END
3721 #define TARGET_ASM_FILE_END nds32_asm_file_end
3723 /* -- Output of Data. */
3725 #undef TARGET_ASM_ALIGNED_HI_OP
3726 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3728 #undef TARGET_ASM_ALIGNED_SI_OP
3729 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
3731 /* -- Output of Uninitialized Variables. */
3733 /* -- Output and Generation of Labels. */
3735 #undef TARGET_ASM_GLOBALIZE_LABEL
3736 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
3738 /* -- How Initialization Functions Are Handled. */
3740 /* -- Macros Controlling Initialization Routines. */
3742 /* -- Output of Assembler Instructions. */
3744 #undef TARGET_PRINT_OPERAND
3745 #define TARGET_PRINT_OPERAND nds32_print_operand
3746 #undef TARGET_PRINT_OPERAND_ADDRESS
3747 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
3749 /* -- Output of Dispatch Tables. */
3751 /* -- Assembler Commands for Exception Regions. */
3753 /* -- Assembler Commands for Alignment. */
3756 /* Controlling Debugging Information Format. */
3758 /* -- Macros Affecting All Debugging Formats. */
3760 /* -- Specific Options for DBX Output. */
3762 /* -- Open-Ended Hooks for DBX Format. */
3764 /* -- File Names in DBX Format. */
3766 /* -- Macros for SDB and DWARF Output. */
3768 /* -- Macros for VMS Debug Format. */
3771 /* Cross Compilation and Floating Point. */
3774 /* Mode Switching Instructions. */
3777 /* Defining target-specific uses of __attribute__. */
3779 #undef TARGET_ATTRIBUTE_TABLE
3780 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
3782 #undef TARGET_MERGE_DECL_ATTRIBUTES
3783 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
3785 #undef TARGET_INSERT_ATTRIBUTES
3786 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
3788 #undef TARGET_OPTION_PRAGMA_PARSE
3789 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
3791 #undef TARGET_OPTION_OVERRIDE
3792 #define TARGET_OPTION_OVERRIDE nds32_option_override
3795 /* Emulating TLS. */
3798 /* Defining coprocessor specifics for MIPS targets. */
3801 /* Parameters for Precompiled Header Validity Checking. */
3804 /* C++ ABI parameters. */
3807 /* Adding support for named address spaces. */
3810 /* Miscellaneous Parameters. */
3812 #undef TARGET_INIT_BUILTINS
3813 #define TARGET_INIT_BUILTINS nds32_init_builtins
3815 #undef TARGET_EXPAND_BUILTIN
3816 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
3819 /* ------------------------------------------------------------------------ */
3821 /* Initialize the GCC target structure. */
3823 struct gcc_target targetm
= TARGET_INITIALIZER
;
3825 /* ------------------------------------------------------------------------ */