1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* ------------------------------------------------------------------------ */
25 #include "coretypes.h"
30 #include "stringpool.h"
35 #include "optabs.h" /* For GEN_FCN. */
39 #include "diagnostic-core.h"
40 #include "stor-layout.h"
46 #include "tm-constrs.h"
49 /* This file should be included last. */
50 #include "target-def.h"
52 /* ------------------------------------------------------------------------ */
54 /* This file is divided into five parts:
56 PART 1: Auxiliary static variable definitions and
57 target hook static variable definitions.
59 PART 2: Auxiliary static function definitions.
61 PART 3: Implement target hook stuff definitions.
63 PART 4: Implemet extern function definitions,
64 the prototype is in nds32-protos.h.
66 PART 5: Initialize target hook structure and definitions. */
68 /* ------------------------------------------------------------------------ */
70 /* PART 1: Auxiliary static variable definitions and
71 target hook static variable definitions. */
73 /* Define intrinsic register names.
74 Please refer to nds32_intrinsic.h file, the index is corresponding to
75 'enum nds32_intrinsic_registers' data type values.
76 NOTE that the base value starting from 1024. */
77 static const char * const nds32_intrinsic_register_names
[] =
79 "$PSW", "$IPSW", "$ITYPE", "$IPC"
82 /* Defining target-specific uses of __attribute__. */
83 static const struct attribute_spec nds32_attribute_table
[] =
85 /* Syntax: { name, min_len, max_len, decl_required, type_required,
86 function_type_required, handler, affects_type_identity } */
88 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
89 { "interrupt", 1, 64, false, false, false, NULL
, false },
90 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
91 { "exception", 1, 8, false, false, false, NULL
, false },
92 /* Argument is user's interrupt numbers. The vector number is always 0. */
93 { "reset", 1, 1, false, false, false, NULL
, false },
95 /* The attributes describing isr nested type. */
96 { "nested", 0, 0, false, false, false, NULL
, false },
97 { "not_nested", 0, 0, false, false, false, NULL
, false },
98 { "nested_ready", 0, 0, false, false, false, NULL
, false },
100 /* The attributes describing isr register save scheme. */
101 { "save_all", 0, 0, false, false, false, NULL
, false },
102 { "partial_save", 0, 0, false, false, false, NULL
, false },
104 /* The attributes used by reset attribute. */
105 { "nmi", 1, 1, false, false, false, NULL
, false },
106 { "warm", 1, 1, false, false, false, NULL
, false },
108 /* The attribute telling no prologue/epilogue. */
109 { "naked", 0, 0, false, false, false, NULL
, false },
111 /* The last attribute spec is set to be NULL. */
112 { NULL
, 0, 0, false, false, false, NULL
, false }
116 /* ------------------------------------------------------------------------ */
118 /* PART 2: Auxiliary static function definitions. */
120 /* Function to save and restore machine-specific function data. */
121 static struct machine_function
*
122 nds32_init_machine_status (void)
124 struct machine_function
*machine
;
125 machine
= ggc_cleared_alloc
<machine_function
> ();
127 /* Initially assume this function needs prologue/epilogue. */
128 machine
->naked_p
= 0;
130 /* Initially assume this function does NOT use fp_as_gp optimization. */
131 machine
->fp_as_gp_p
= 0;
136 /* Function to compute stack frame size and
137 store into cfun->machine structure. */
139 nds32_compute_stack_frame (void)
144 /* Because nds32_compute_stack_frame() will be called from different place,
145 everytime we enter this function, we have to assume this function
146 needs prologue/epilogue. */
147 cfun
->machine
->naked_p
= 0;
149 /* Get variadic arguments size to prepare pretend arguments and
150 we will push them into stack at prologue by ourself. */
151 cfun
->machine
->va_args_size
= crtl
->args
.pretend_args_size
;
152 if (cfun
->machine
->va_args_size
!= 0)
154 cfun
->machine
->va_args_first_regno
155 = NDS32_GPR_ARG_FIRST_REGNUM
156 + NDS32_MAX_GPR_REGS_FOR_ARGS
157 - (crtl
->args
.pretend_args_size
/ UNITS_PER_WORD
);
158 cfun
->machine
->va_args_last_regno
159 = NDS32_GPR_ARG_FIRST_REGNUM
+ NDS32_MAX_GPR_REGS_FOR_ARGS
- 1;
163 cfun
->machine
->va_args_first_regno
= SP_REGNUM
;
164 cfun
->machine
->va_args_last_regno
= SP_REGNUM
;
167 /* Important: We need to make sure that varargs area is 8-byte alignment. */
168 block_size
= cfun
->machine
->va_args_size
;
169 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size
))
171 cfun
->machine
->va_args_area_padding_bytes
172 = NDS32_ROUND_UP_DOUBLE_WORD (block_size
) - block_size
;
175 /* Get local variables, incoming variables, and temporary variables size.
176 Note that we need to make sure it is 8-byte alignment because
177 there may be no padding bytes if we are using LRA. */
178 cfun
->machine
->local_size
= NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
180 /* Get outgoing arguments size. */
181 cfun
->machine
->out_args_size
= crtl
->outgoing_args_size
;
183 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
184 Check whether $fp is ever live. */
185 cfun
->machine
->fp_size
= (df_regs_ever_live_p (FP_REGNUM
)) ? 4 : 0;
187 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
188 Check whether we are using PIC code genration. */
189 cfun
->machine
->gp_size
= (flag_pic
) ? 4 : 0;
191 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
192 Check whether $lp is ever live. */
193 cfun
->machine
->lp_size
= (df_regs_ever_live_p (LP_REGNUM
)) ? 4 : 0;
195 /* Initially there is no padding bytes. */
196 cfun
->machine
->callee_saved_area_gpr_padding_bytes
= 0;
198 /* Calculate the bytes of saving callee-saved registers on stack. */
199 cfun
->machine
->callee_saved_gpr_regs_size
= 0;
200 cfun
->machine
->callee_saved_first_gpr_regno
= SP_REGNUM
;
201 cfun
->machine
->callee_saved_last_gpr_regno
= SP_REGNUM
;
202 /* Currently, there is no need to check $r28~$r31
203 because we will save them in another way. */
204 for (r
= 0; r
< 28; r
++)
206 if (NDS32_REQUIRED_CALLEE_SAVED_P (r
))
208 /* Mark the first required callee-saved register
209 (only need to set it once).
210 If first regno == SP_REGNUM, we can tell that
211 it is the first time to be here. */
212 if (cfun
->machine
->callee_saved_first_gpr_regno
== SP_REGNUM
)
213 cfun
->machine
->callee_saved_first_gpr_regno
= r
;
214 /* Mark the last required callee-saved register. */
215 cfun
->machine
->callee_saved_last_gpr_regno
= r
;
219 /* Check if this function can omit prologue/epilogue code fragment.
220 If there is 'naked' attribute in this function,
221 we can set 'naked_p' flag to indicate that
222 we do not have to generate prologue/epilogue.
223 Or, if all the following conditions succeed,
224 we can set this function 'naked_p' as well:
225 condition 1: first_regno == last_regno == SP_REGNUM,
226 which means we do not have to save
227 any callee-saved registers.
228 condition 2: Both $lp and $fp are NOT live in this function,
229 which means we do not need to save them and there
231 condition 3: There is no local_size, which means
232 we do not need to adjust $sp. */
233 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl
))
234 || (cfun
->machine
->callee_saved_first_gpr_regno
== SP_REGNUM
235 && cfun
->machine
->callee_saved_last_gpr_regno
== SP_REGNUM
236 && !df_regs_ever_live_p (FP_REGNUM
)
237 && !df_regs_ever_live_p (LP_REGNUM
)
238 && cfun
->machine
->local_size
== 0))
240 /* Set this function 'naked_p' and other functions can check this flag.
241 Note that in nds32 port, the 'naked_p = 1' JUST means there is no
242 callee-saved, local size, and outgoing size.
243 The varargs space and ret instruction may still present in
244 the prologue/epilogue expanding. */
245 cfun
->machine
->naked_p
= 1;
247 /* No need to save $fp, $gp, and $lp.
248 We should set these value to be zero
249 so that nds32_initial_elimination_offset() can work properly. */
250 cfun
->machine
->fp_size
= 0;
251 cfun
->machine
->gp_size
= 0;
252 cfun
->machine
->lp_size
= 0;
254 /* If stack usage computation is required,
255 we need to provide the static stack size. */
256 if (flag_stack_usage_info
)
257 current_function_static_stack_size
= 0;
259 /* No need to do following adjustment, return immediately. */
263 /* Adjustment for v3push instructions:
264 If we are using v3push (push25/pop25) instructions,
265 we need to make sure Rb is $r6 and Re is
266 located on $r6, $r8, $r10, or $r14.
267 Some results above will be discarded and recomputed.
268 Note that it is only available under V3/V3M ISA and we
269 DO NOT setup following stuff for isr or variadic function. */
271 && !nds32_isr_function_p (current_function_decl
)
272 && (cfun
->machine
->va_args_size
== 0))
275 cfun->machine->fp_size
276 cfun->machine->gp_size
277 cfun->machine->lp_size
278 cfun->machine->callee_saved_regs_first_regno
279 cfun->machine->callee_saved_regs_last_regno */
281 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
282 cfun
->machine
->fp_size
= 4;
283 cfun
->machine
->gp_size
= 4;
284 cfun
->machine
->lp_size
= 4;
286 /* Remember to set Rb = $r6. */
287 cfun
->machine
->callee_saved_first_gpr_regno
= 6;
289 if (cfun
->machine
->callee_saved_last_gpr_regno
<= 6)
292 cfun
->machine
->callee_saved_last_gpr_regno
= 6;
294 else if (cfun
->machine
->callee_saved_last_gpr_regno
<= 8)
297 cfun
->machine
->callee_saved_last_gpr_regno
= 8;
299 else if (cfun
->machine
->callee_saved_last_gpr_regno
<= 10)
302 cfun
->machine
->callee_saved_last_gpr_regno
= 10;
304 else if (cfun
->machine
->callee_saved_last_gpr_regno
<= 14)
307 cfun
->machine
->callee_saved_last_gpr_regno
= 14;
309 else if (cfun
->machine
->callee_saved_last_gpr_regno
== SP_REGNUM
)
311 /* If last_regno is SP_REGNUM, which means
312 it is never changed, so set it to Re = $r6. */
313 cfun
->machine
->callee_saved_last_gpr_regno
= 6;
317 /* The program flow should not go here. */
322 /* We have correctly set callee_saved_regs_first_regno
323 and callee_saved_regs_last_regno.
324 Initially, the callee_saved_regs_size is supposed to be 0.
325 As long as callee_saved_regs_last_regno is not SP_REGNUM,
326 we can update callee_saved_regs_size with new size. */
327 if (cfun
->machine
->callee_saved_last_gpr_regno
!= SP_REGNUM
)
329 /* Compute pushed size of callee-saved registers. */
330 cfun
->machine
->callee_saved_gpr_regs_size
331 = 4 * (cfun
->machine
->callee_saved_last_gpr_regno
332 - cfun
->machine
->callee_saved_first_gpr_regno
336 /* Important: We need to make sure that
337 (fp_size + gp_size + lp_size + callee_saved_regs_size)
339 If it is not, calculate the padding bytes. */
340 block_size
= cfun
->machine
->fp_size
341 + cfun
->machine
->gp_size
342 + cfun
->machine
->lp_size
343 + cfun
->machine
->callee_saved_gpr_regs_size
;
344 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size
))
346 cfun
->machine
->callee_saved_area_gpr_padding_bytes
347 = NDS32_ROUND_UP_DOUBLE_WORD (block_size
) - block_size
;
350 /* If stack usage computation is required,
351 we need to provide the static stack size. */
352 if (flag_stack_usage_info
)
354 current_function_static_stack_size
355 = NDS32_ROUND_UP_DOUBLE_WORD (block_size
)
356 + cfun
->machine
->local_size
357 + cfun
->machine
->out_args_size
;
361 /* Function to create a parallel rtx pattern
362 which presents stack push multiple behavior.
363 The overall concept are:
364 "push registers to memory",
365 "adjust stack pointer". */
367 nds32_emit_stack_push_multiple (rtx Rb
, rtx Re
, rtx En4
, bool vaarg_p
)
374 int save_fp
, save_gp
, save_lp
;
383 /* We need to provide a customized rtx which contains
384 necessary information for data analysis,
385 so we create a parallel rtx like this:
386 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
388 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
391 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
393 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
395 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
397 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
399 (set (reg:SI SP_REGNUM)
400 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
402 /* Determine whether we need to save $fp, $gp, or $lp. */
403 save_fp
= INTVAL (En4
) & 0x8;
404 save_gp
= INTVAL (En4
) & 0x4;
405 save_lp
= INTVAL (En4
) & 0x2;
407 /* Calculate the number of registers that will be pushed. */
415 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
416 if (REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
)
417 num_use_regs
= extra_count
;
419 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + extra_count
;
421 /* In addition to used registers,
422 we need one more space for (set sp sp-x) rtx. */
423 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
424 rtvec_alloc (num_use_regs
+ 1));
427 /* Initialize offset and start to create push behavior. */
428 offset
= -(num_use_regs
* 4);
430 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
431 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
433 /* Rb and Re may be SP_REGNUM.
434 We need to break this loop immediately. */
435 if (regno
== SP_REGNUM
)
438 reg
= gen_rtx_REG (SImode
, regno
);
439 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
442 push_rtx
= gen_rtx_SET (mem
, reg
);
443 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
444 RTX_FRAME_RELATED_P (push_rtx
) = 1;
449 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
452 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
453 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
456 push_rtx
= gen_rtx_SET (mem
, reg
);
457 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
458 RTX_FRAME_RELATED_P (push_rtx
) = 1;
464 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
465 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
468 push_rtx
= gen_rtx_SET (mem
, reg
);
469 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
470 RTX_FRAME_RELATED_P (push_rtx
) = 1;
476 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
477 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
480 push_rtx
= gen_rtx_SET (mem
, reg
);
481 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
482 RTX_FRAME_RELATED_P (push_rtx
) = 1;
487 /* Create (set sp sp-x). */
489 /* We need to re-calculate the offset value again for adjustment. */
490 offset
= -(num_use_regs
* 4);
492 = gen_rtx_SET (stack_pointer_rtx
,
493 plus_constant (Pmode
, stack_pointer_rtx
, offset
));
494 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
495 RTX_FRAME_RELATED_P (adjust_sp_rtx
) = 1;
497 parallel_insn
= emit_insn (parallel_insn
);
499 /* The insn rtx 'parallel_insn' will change frame layout.
500 We need to use RTX_FRAME_RELATED_P so that GCC is able to
501 generate CFI (Call Frame Information) stuff. */
502 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
504 /* Don't use GCC's logic for CFI info if we are generate a push for VAARG
505 since we will not restore those register at epilogue. */
508 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
,
509 copy_rtx (adjust_sp_rtx
), NULL_RTX
);
510 REG_NOTES (parallel_insn
) = dwarf
;
514 /* Function to create a parallel rtx pattern
515 which presents stack pop multiple behavior.
516 The overall concept are:
517 "pop registers from memory",
518 "adjust stack pointer". */
520 nds32_emit_stack_pop_multiple (rtx Rb
, rtx Re
, rtx En4
)
527 int save_fp
, save_gp
, save_lp
;
534 rtx dwarf
= NULL_RTX
;
536 /* We need to provide a customized rtx which contains
537 necessary information for data analysis,
538 so we create a parallel rtx like this:
539 (parallel [(set (reg:SI Rb)
540 (mem (reg:SI SP_REGNUM)))
542 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
545 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
546 (set (reg:SI FP_REGNUM)
547 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
548 (set (reg:SI GP_REGNUM)
549 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
550 (set (reg:SI LP_REGNUM)
551 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
552 (set (reg:SI SP_REGNUM)
553 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
555 /* Determine whether we need to restore $fp, $gp, or $lp. */
556 save_fp
= INTVAL (En4
) & 0x8;
557 save_gp
= INTVAL (En4
) & 0x4;
558 save_lp
= INTVAL (En4
) & 0x2;
560 /* Calculate the number of registers that will be poped. */
568 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
569 if (REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
)
570 num_use_regs
= extra_count
;
572 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + extra_count
;
574 /* In addition to used registers,
575 we need one more space for (set sp sp+x) rtx. */
576 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
577 rtvec_alloc (num_use_regs
+ 1));
580 /* Initialize offset and start to create pop behavior. */
583 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
584 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
586 /* Rb and Re may be SP_REGNUM.
587 We need to break this loop immediately. */
588 if (regno
== SP_REGNUM
)
591 reg
= gen_rtx_REG (SImode
, regno
);
592 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
595 pop_rtx
= gen_rtx_SET (reg
, mem
);
596 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
597 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
601 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
604 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
607 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
608 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
611 pop_rtx
= gen_rtx_SET (reg
, mem
);
612 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
613 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
617 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
621 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
622 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
625 pop_rtx
= gen_rtx_SET (reg
, mem
);
626 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
627 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
631 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
635 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
636 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
639 pop_rtx
= gen_rtx_SET (reg
, mem
);
640 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
641 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
645 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
648 /* Create (set sp sp+x). */
650 /* The offset value is already in place. No need to re-calculate it. */
652 = gen_rtx_SET (stack_pointer_rtx
,
653 plus_constant (Pmode
, stack_pointer_rtx
, offset
));
654 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
656 /* Tell gcc we adjust SP in this insn. */
657 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, copy_rtx (adjust_sp_rtx
), dwarf
);
659 parallel_insn
= emit_insn (parallel_insn
);
661 /* The insn rtx 'parallel_insn' will change frame layout.
662 We need to use RTX_FRAME_RELATED_P so that GCC is able to
663 generate CFI (Call Frame Information) stuff. */
664 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
666 /* Add CFI info by manual. */
667 REG_NOTES (parallel_insn
) = dwarf
;
670 /* Function to create a parallel rtx pattern
671 which presents stack v3push behavior.
672 The overall concept are:
673 "push registers to memory",
674 "adjust stack pointer". */
676 nds32_emit_stack_v3push (rtx Rb
,
678 rtx En4 ATTRIBUTE_UNUSED
,
692 /* We need to provide a customized rtx which contains
693 necessary information for data analysis,
694 so we create a parallel rtx like this:
695 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
697 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
700 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
702 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
704 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
706 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
708 (set (reg:SI SP_REGNUM)
709 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
711 /* Calculate the number of registers that will be pushed.
712 Since $fp, $gp, and $lp is always pushed with v3push instruction,
713 we need to count these three registers.
714 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
715 So there is no need to worry about Rb=Re=SP_REGNUM case. */
716 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + 3;
718 /* In addition to used registers,
719 we need one more space for (set sp sp-x-imm8u) rtx. */
720 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
721 rtvec_alloc (num_use_regs
+ 1));
724 /* Initialize offset and start to create push behavior. */
725 offset
= -(num_use_regs
* 4);
727 /* Create (set mem regX) from Rb, Rb+1 up to Re.
728 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
729 So there is no need to worry about Rb=Re=SP_REGNUM case. */
730 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
732 reg
= gen_rtx_REG (SImode
, regno
);
733 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
736 push_rtx
= gen_rtx_SET (mem
, reg
);
737 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
738 RTX_FRAME_RELATED_P (push_rtx
) = 1;
743 /* Create (set mem fp). */
744 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
745 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
748 push_rtx
= gen_rtx_SET (mem
, reg
);
749 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
750 RTX_FRAME_RELATED_P (push_rtx
) = 1;
753 /* Create (set mem gp). */
754 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
755 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
758 push_rtx
= gen_rtx_SET (mem
, reg
);
759 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
760 RTX_FRAME_RELATED_P (push_rtx
) = 1;
763 /* Create (set mem lp). */
764 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
765 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
768 push_rtx
= gen_rtx_SET (mem
, reg
);
769 XVECEXP (parallel_insn
, 0, par_index
) = push_rtx
;
770 RTX_FRAME_RELATED_P (push_rtx
) = 1;
774 /* Create (set sp sp-x-imm8u). */
776 /* We need to re-calculate the offset value again for adjustment. */
777 offset
= -(num_use_regs
* 4);
779 = gen_rtx_SET (stack_pointer_rtx
,
780 plus_constant (Pmode
,
782 offset
- INTVAL (imm8u
)));
783 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
784 RTX_FRAME_RELATED_P (adjust_sp_rtx
) = 1;
786 parallel_insn
= emit_insn (parallel_insn
);
788 /* The insn rtx 'parallel_insn' will change frame layout.
789 We need to use RTX_FRAME_RELATED_P so that GCC is able to
790 generate CFI (Call Frame Information) stuff. */
791 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
794 /* Function to create a parallel rtx pattern
795 which presents stack v3pop behavior.
796 The overall concept are:
797 "pop registers from memory",
798 "adjust stack pointer". */
800 nds32_emit_stack_v3pop (rtx Rb
,
802 rtx En4 ATTRIBUTE_UNUSED
,
815 rtx dwarf
= NULL_RTX
;
817 /* We need to provide a customized rtx which contains
818 necessary information for data analysis,
819 so we create a parallel rtx like this:
820 (parallel [(set (reg:SI Rb)
821 (mem (reg:SI SP_REGNUM)))
823 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
826 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
827 (set (reg:SI FP_REGNUM)
828 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
829 (set (reg:SI GP_REGNUM)
830 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
831 (set (reg:SI LP_REGNUM)
832 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
833 (set (reg:SI SP_REGNUM)
834 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
836 /* Calculate the number of registers that will be poped.
837 Since $fp, $gp, and $lp is always poped with v3pop instruction,
838 we need to count these three registers.
839 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
840 So there is no need to worry about Rb=Re=SP_REGNUM case. */
841 num_use_regs
= REGNO (Re
) - REGNO (Rb
) + 1 + 3;
843 /* In addition to used registers,
844 we need one more space for (set sp sp+x+imm8u) rtx. */
845 parallel_insn
= gen_rtx_PARALLEL (VOIDmode
,
846 rtvec_alloc (num_use_regs
+ 1));
849 /* Initialize offset and start to create pop behavior. */
852 /* Create (set regX mem) from Rb, Rb+1 up to Re.
853 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
854 So there is no need to worry about Rb=Re=SP_REGNUM case. */
855 for (regno
= REGNO (Rb
); regno
<= (int) REGNO (Re
); regno
++)
857 reg
= gen_rtx_REG (SImode
, regno
);
858 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
861 pop_rtx
= gen_rtx_SET (reg
, mem
);
862 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
863 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
867 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
870 /* Create (set fp mem). */
871 reg
= gen_rtx_REG (SImode
, FP_REGNUM
);
872 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
875 pop_rtx
= gen_rtx_SET (reg
, mem
);
876 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
877 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
880 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
882 /* Create (set gp mem). */
883 reg
= gen_rtx_REG (SImode
, GP_REGNUM
);
884 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
887 pop_rtx
= gen_rtx_SET (reg
, mem
);
888 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
889 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
892 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
894 /* Create (set lp mem ). */
895 reg
= gen_rtx_REG (SImode
, LP_REGNUM
);
896 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
,
899 pop_rtx
= gen_rtx_SET (reg
, mem
);
900 XVECEXP (parallel_insn
, 0, par_index
) = pop_rtx
;
901 RTX_FRAME_RELATED_P (pop_rtx
) = 1;
904 dwarf
= alloc_reg_note (REG_CFA_RESTORE
, reg
, dwarf
);
906 /* Create (set sp sp+x+imm8u). */
908 /* The offset value is already in place. No need to re-calculate it. */
910 = gen_rtx_SET (stack_pointer_rtx
,
911 plus_constant (Pmode
,
913 offset
+ INTVAL (imm8u
)));
914 XVECEXP (parallel_insn
, 0, par_index
) = adjust_sp_rtx
;
916 /* Tell gcc we adjust SP in this insn. */
917 dwarf
= alloc_reg_note (REG_CFA_ADJUST_CFA
, copy_rtx (adjust_sp_rtx
), dwarf
);
919 parallel_insn
= emit_insn (parallel_insn
);
921 /* The insn rtx 'parallel_insn' will change frame layout.
922 We need to use RTX_FRAME_RELATED_P so that GCC is able to
923 generate CFI (Call Frame Information) stuff. */
924 RTX_FRAME_RELATED_P (parallel_insn
) = 1;
926 /* Add CFI info by manual. */
927 REG_NOTES (parallel_insn
) = dwarf
;
930 /* Function that may creates more instructions
931 for large value on adjusting stack pointer.
933 In nds32 target, 'addi' can be used for stack pointer
934 adjustment in prologue/epilogue stage.
935 However, sometimes there are too many local variables so that
936 the adjustment value is not able to be fit in the 'addi' instruction.
937 One solution is to move value into a register
938 and then use 'add' instruction.
939 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
940 Also, we need to return zero for sp adjustment so that
941 proglogue/epilogue knows there is no need to create 'addi' instruction. */
943 nds32_force_addi_stack_int (int full_value
)
950 if (!satisfies_constraint_Is15 (GEN_INT (full_value
)))
952 /* The value is not able to fit in single addi instruction.
953 Create more instructions of moving value into a register
954 and then add stack pointer with it. */
956 /* $r15 is going to be temporary register to hold the value. */
957 tmp_reg
= gen_rtx_REG (SImode
, TA_REGNUM
);
959 /* Create one more instruction to move value
960 into the temporary register. */
961 emit_move_insn (tmp_reg
, GEN_INT (full_value
));
963 /* Create new 'add' rtx. */
964 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
967 /* Emit rtx into insn list and receive its transformed insn rtx. */
968 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
970 /* At prologue, we need to tell GCC that this is frame related insn,
971 so that we can consider this instruction to output debug information.
972 If full_value is NEGATIVE, it means this function
973 is invoked by expand_prologue. */
976 /* Because (tmp_reg <- full_value) may be split into two
977 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
978 We need to construct another (sp <- sp + full_value)
979 and then insert it into sp_adjust_insn's reg note to
980 represent a frame related expression.
981 GCC knows how to refer it and output debug information. */
986 plus_rtx
= plus_constant (Pmode
, stack_pointer_rtx
, full_value
);
987 set_rtx
= gen_rtx_SET (stack_pointer_rtx
, plus_rtx
);
988 add_reg_note (sp_adjust_insn
, REG_FRAME_RELATED_EXPR
, set_rtx
);
990 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
993 /* We have used alternative way to adjust stack pointer value.
994 Return zero so that prologue/epilogue
995 will not generate other instructions. */
1000 /* The value is able to fit in addi instruction.
1001 However, remember to make it to be positive value
1002 because we want to return 'adjustment' result. */
1003 adjust_value
= (full_value
< 0) ? (-full_value
) : (full_value
);
1005 return adjust_value
;
1009 /* Return true if MODE/TYPE need double word alignment. */
1011 nds32_needs_double_word_align (machine_mode mode
, const_tree type
)
1015 /* Pick up the alignment according to the mode or type. */
1016 align
= NDS32_MODE_TYPE_ALIGN (mode
, type
);
1018 return (align
> PARM_BOUNDARY
);
1021 /* Return true if FUNC is a naked function. */
1023 nds32_naked_function_p (tree func
)
1027 if (TREE_CODE (func
) != FUNCTION_DECL
)
1030 t
= lookup_attribute ("naked", DECL_ATTRIBUTES (func
));
1032 return (t
!= NULL_TREE
);
1035 /* Function that check if 'X' is a valid address register.
1036 The variable 'STRICT' is very important to
1037 make decision for register number.
1040 => We are in reload pass or after reload pass.
1041 The register number should be strictly limited in general registers.
1044 => Before reload pass, we are free to use any register number. */
1046 nds32_address_register_rtx_p (rtx x
, bool strict
)
1050 if (GET_CODE (x
) != REG
)
1056 return REGNO_OK_FOR_BASE_P (regno
);
1061 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1063 OUTER_MODE : Machine mode of outer address rtx.
1064 INDEX : Check if this rtx is valid to be a index for address.
1065 STRICT : If it is true, we are in reload pass or after reload pass. */
1067 nds32_legitimate_index_p (machine_mode outer_mode
,
1075 switch (GET_CODE (index
))
1078 regno
= REGNO (index
);
1079 /* If we are in reload pass or after reload pass,
1080 we need to limit it to general register. */
1082 return REGNO_OK_FOR_INDEX_P (regno
);
1087 /* The alignment of the integer value is determined by 'outer_mode'. */
1088 if (GET_MODE_SIZE (outer_mode
) == 1)
1090 /* Further check if the value is legal for the 'outer_mode'. */
1091 if (!satisfies_constraint_Is15 (index
))
1094 /* Pass all test, the value is valid, return true. */
1097 if (GET_MODE_SIZE (outer_mode
) == 2
1098 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index
)))
1100 /* Further check if the value is legal for the 'outer_mode'. */
1101 if (!satisfies_constraint_Is16 (index
))
1104 /* Pass all test, the value is valid, return true. */
1107 if (GET_MODE_SIZE (outer_mode
) == 4
1108 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index
)))
1110 /* Further check if the value is legal for the 'outer_mode'. */
1111 if (!satisfies_constraint_Is17 (index
))
1114 /* Pass all test, the value is valid, return true. */
1117 if (GET_MODE_SIZE (outer_mode
) == 8
1118 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index
)))
1120 /* Further check if the value is legal for the 'outer_mode'. */
1121 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index
) + 4,
1125 /* Pass all test, the value is valid, return true. */
1132 op0
= XEXP (index
, 0);
1133 op1
= XEXP (index
, 1);
1135 if (REG_P (op0
) && CONST_INT_P (op1
))
1138 multiplier
= INTVAL (op1
);
1140 /* We only allow (mult reg const_int_1)
1141 or (mult reg const_int_2) or (mult reg const_int_4). */
1142 if (multiplier
!= 1 && multiplier
!= 2 && multiplier
!= 4)
1145 regno
= REGNO (op0
);
1146 /* Limit it in general registers if we are
1147 in reload pass or after reload pass. */
1149 return REGNO_OK_FOR_INDEX_P (regno
);
1157 op0
= XEXP (index
, 0);
1158 op1
= XEXP (index
, 1);
1160 if (REG_P (op0
) && CONST_INT_P (op1
))
1163 /* op1 is already the sv value for use to do left shift. */
1166 /* We only allow (ashift reg const_int_0)
1167 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1168 if (sv
!= 0 && sv
!= 1 && sv
!=2)
1171 regno
= REGNO (op0
);
1172 /* Limit it in general registers if we are
1173 in reload pass or after reload pass. */
1175 return REGNO_OK_FOR_INDEX_P (regno
);
1187 /* ------------------------------------------------------------------------ */
1189 /* PART 3: Implement target hook stuff definitions. */
1191 /* Register Classes. */
1193 static unsigned char
1194 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED
,
1197 /* Return the maximum number of consecutive registers
1198 needed to represent "mode" in a register of "rclass". */
1199 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
1203 nds32_register_priority (int hard_regno
)
1205 /* Encourage to use r0-r7 for LRA when optimize for size. */
1206 if (optimize_size
&& hard_regno
< 8)
1212 /* Stack Layout and Calling Conventions. */
1214 /* There are three kinds of pointer concepts using in GCC compiler:
1216 frame pointer: A pointer to the first location of local variables.
1217 stack pointer: A pointer to the top of a stack frame.
1218 argument pointer: A pointer to the incoming arguments.
1220 In nds32 target calling convention, we are using 8-byte alignment.
1221 Besides, we would like to have each stack frame of a function includes:
1224 1. previous hard frame pointer
1226 3. callee-saved registers
1227 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1229 cfun->machine->callee_saved_area_padding_bytes)
1233 2. spilling location
1234 3. <padding bytes> (it will be calculated by GCC itself)
1235 4. incoming arguments
1236 5. <padding bytes> (it will be calculated by GCC itself)
1239 1. <padding bytes> (it will be calculated by GCC itself)
1240 2. outgoing arguments
1242 We 'wrap' these blocks together with
1243 hard frame pointer ($r28) and stack pointer ($r31).
1244 By applying the basic frame/stack/argument pointers concept,
1245 the layout of a stack frame shoule be like this:
1248 old stack pointer -> ----
1250 | | saved arguments for
1251 | | vararg functions
1253 hard frame pointer -> --
1254 & argument pointer | | \
1255 | | previous hardware frame pointer
1257 | | callee-saved registers
1262 | | and incoming arguments
1269 stack pointer -> ----
1271 $SFP and $AP are used to represent frame pointer and arguments pointer,
1272 which will be both eliminated as hard frame pointer. */
1274 /* -- Eliminating Frame Pointer and Arg Pointer. */
1277 nds32_can_eliminate (const int from_reg
, const int to_reg
)
1279 if (from_reg
== ARG_POINTER_REGNUM
&& to_reg
== STACK_POINTER_REGNUM
)
1282 if (from_reg
== ARG_POINTER_REGNUM
&& to_reg
== HARD_FRAME_POINTER_REGNUM
)
1285 if (from_reg
== FRAME_POINTER_REGNUM
&& to_reg
== STACK_POINTER_REGNUM
)
1288 if (from_reg
== FRAME_POINTER_REGNUM
&& to_reg
== HARD_FRAME_POINTER_REGNUM
)
1294 /* -- Passing Arguments in Registers. */
1297 nds32_function_arg (cumulative_args_t ca
, machine_mode mode
,
1298 const_tree type
, bool named
)
1301 CUMULATIVE_ARGS
*cum
= get_cumulative_args (ca
);
1303 /* The last time this hook is called,
1304 it is called with MODE == VOIDmode. */
1305 if (mode
== VOIDmode
)
1308 /* For nameless arguments, we need to take care it individually. */
1311 /* If we are under hard float abi, we have arguments passed on the
1312 stack and all situation can be handled by GCC itself. */
1313 if (TARGET_HARD_FLOAT
)
1316 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum
->gpr_offset
, mode
, type
))
1318 /* If we still have enough registers to pass argument, pick up
1319 next available register number. */
1321 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
);
1322 return gen_rtx_REG (mode
, regno
);
1325 /* No register available, return NULL_RTX.
1326 The compiler will use stack to pass argument instead. */
1330 /* The following is to handle named argument.
1331 Note that the strategies of TARGET_HARD_FLOAT and !TARGET_HARD_FLOAT
1333 if (TARGET_HARD_FLOAT
)
1335 /* Currently we have not implemented hard float yet. */
1340 /* For !TARGET_HARD_FLOAT calling convention, we always use GPR to pass
1341 argument. Since we allow to pass argument partially in registers,
1342 we can just return it if there are still registers available. */
1343 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum
->gpr_offset
, mode
, type
))
1345 /* Pick up the next available register number. */
1347 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
);
1348 return gen_rtx_REG (mode
, regno
);
1353 /* No register available, return NULL_RTX.
1354 The compiler will use stack to pass argument instead. */
1359 nds32_must_pass_in_stack (machine_mode mode
, const_tree type
)
1361 /* Return true if a type must be passed in memory.
1362 If it is NOT using hard float abi, small aggregates can be
1363 passed in a register even we are calling a variadic function.
1364 So there is no need to take padding into consideration. */
1365 if (TARGET_HARD_FLOAT
)
1366 return must_pass_in_stack_var_size_or_pad (mode
, type
);
1368 return must_pass_in_stack_var_size (mode
, type
);
1372 nds32_arg_partial_bytes (cumulative_args_t ca
, machine_mode mode
,
1373 tree type
, bool named ATTRIBUTE_UNUSED
)
1375 /* Returns the number of bytes at the beginning of an argument that
1376 must be put in registers. The value must be zero for arguments that are
1377 passed entirely in registers or that are entirely pushed on the stack.
1378 Besides, TARGET_FUNCTION_ARG for these arguments should return the
1379 first register to be used by the caller for this argument. */
1380 unsigned int needed_reg_count
;
1381 unsigned int remaining_reg_count
;
1382 CUMULATIVE_ARGS
*cum
;
1384 cum
= get_cumulative_args (ca
);
1386 /* Under hard float abi, we better have argument entirely passed in
1387 registers or pushed on the stack so that we can reduce the complexity
1388 of dealing with cum->gpr_offset and cum->fpr_offset. */
1389 if (TARGET_HARD_FLOAT
)
1392 /* If we have already runned out of argument registers, return zero
1393 so that the argument will be entirely pushed on the stack. */
1394 if (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1395 >= NDS32_GPR_ARG_FIRST_REGNUM
+ NDS32_MAX_GPR_REGS_FOR_ARGS
)
1398 /* Calculate how many registers do we need for this argument. */
1399 needed_reg_count
= NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1401 /* Calculate how many argument registers have left for passing argument.
1402 Note that we should count it from next available register number. */
1404 = NDS32_MAX_GPR_REGS_FOR_ARGS
1405 - (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1406 - NDS32_GPR_ARG_FIRST_REGNUM
);
1408 /* Note that we have to return the nubmer of bytes, not registers count. */
1409 if (needed_reg_count
> remaining_reg_count
)
1410 return remaining_reg_count
* UNITS_PER_WORD
;
1416 nds32_function_arg_advance (cumulative_args_t ca
, machine_mode mode
,
1417 const_tree type
, bool named
)
1419 machine_mode sub_mode
;
1420 CUMULATIVE_ARGS
*cum
= get_cumulative_args (ca
);
1424 /* We need to further check TYPE and MODE so that we can determine
1425 which kind of register we shall advance. */
1426 if (type
&& TREE_CODE (type
) == COMPLEX_TYPE
)
1427 sub_mode
= TYPE_MODE (TREE_TYPE (type
));
1431 /* Under hard float abi, we may advance FPR registers. */
1432 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (sub_mode
) == MODE_FLOAT
)
1434 /* Currently we have not implemented hard float yet. */
1440 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1441 - NDS32_GPR_ARG_FIRST_REGNUM
1442 + NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1447 /* If this nameless argument is NOT under TARGET_HARD_FLOAT,
1448 we can advance next register as well so that caller is
1449 able to pass arguments in registers and callee must be
1450 in charge of pushing all of them into stack. */
1451 if (!TARGET_HARD_FLOAT
)
1454 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1455 - NDS32_GPR_ARG_FIRST_REGNUM
1456 + NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1462 nds32_function_arg_boundary (machine_mode mode
, const_tree type
)
1464 return (nds32_needs_double_word_align (mode
, type
)
1465 ? NDS32_DOUBLE_WORD_ALIGNMENT
1469 /* -- How Scalar Function Values Are Returned. */
1472 nds32_function_value (const_tree ret_type
,
1473 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1474 bool outgoing ATTRIBUTE_UNUSED
)
1479 mode
= TYPE_MODE (ret_type
);
1480 unsignedp
= TYPE_UNSIGNED (ret_type
);
1482 mode
= promote_mode (ret_type
, mode
, &unsignedp
);
1484 return gen_rtx_REG (mode
, NDS32_GPR_RET_FIRST_REGNUM
);
1488 nds32_libcall_value (machine_mode mode
,
1489 const_rtx fun ATTRIBUTE_UNUSED
)
1491 return gen_rtx_REG (mode
, NDS32_GPR_RET_FIRST_REGNUM
);
1495 nds32_function_value_regno_p (const unsigned int regno
)
1497 return (regno
== NDS32_GPR_RET_FIRST_REGNUM
);
1500 /* -- Function Entry and Exit. */
1502 /* The content produced from this function
1503 will be placed before prologue body. */
1505 nds32_asm_function_prologue (FILE *file
)
1508 const char *func_name
;
1512 /* All stack frame information is supposed to be
1513 already computed when expanding prologue.
1514 The result is in cfun->machine.
1515 DO NOT call nds32_compute_stack_frame() here
1516 because it may corrupt the essential information. */
1518 fprintf (file
, "\t! BEGIN PROLOGUE\n");
1519 fprintf (file
, "\t! fp needed: %d\n", frame_pointer_needed
);
1520 fprintf (file
, "\t! pretend_args: %d\n", cfun
->machine
->va_args_size
);
1521 fprintf (file
, "\t! local_size: %d\n", cfun
->machine
->local_size
);
1522 fprintf (file
, "\t! out_args_size: %d\n", cfun
->machine
->out_args_size
);
1524 /* Use df_regs_ever_live_p() to detect if the register
1525 is ever used in the current function. */
1526 fprintf (file
, "\t! registers ever_live: ");
1527 for (r
= 0; r
< 32; r
++)
1529 if (df_regs_ever_live_p (r
))
1530 fprintf (file
, "%s, ", reg_names
[r
]);
1534 /* Display the attributes of this function. */
1535 fprintf (file
, "\t! function attributes: ");
1536 /* Get the attributes tree list.
1537 Note that GCC builds attributes list with reverse order. */
1538 attrs
= DECL_ATTRIBUTES (current_function_decl
);
1540 /* If there is no any attribute, print out "None". */
1542 fprintf (file
, "None");
1544 /* If there are some attributes, try if we need to
1545 construct isr vector information. */
1546 func_name
= IDENTIFIER_POINTER (DECL_NAME (current_function_decl
));
1547 nds32_construct_isr_vectors_information (attrs
, func_name
);
1549 /* Display all attributes of this function. */
1552 name
= TREE_PURPOSE (attrs
);
1553 fprintf (file
, "%s ", IDENTIFIER_POINTER (name
));
1555 /* Pick up the next attribute. */
1556 attrs
= TREE_CHAIN (attrs
);
1561 /* After rtl prologue has been expanded, this function is used. */
1563 nds32_asm_function_end_prologue (FILE *file
)
1565 fprintf (file
, "\t! END PROLOGUE\n");
1567 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1568 we can generate special directive: ".omit_fp_begin"
1569 to guide linker doing fp-as-gp optimization.
1570 However, for a naked function, which means
1571 it should not have prologue/epilogue,
1572 using fp-as-gp still requires saving $fp by push/pop behavior and
1573 there is no benefit to use fp-as-gp on such small function.
1574 So we need to make sure this function is NOT naked as well. */
1575 if (!frame_pointer_needed
1576 && !cfun
->machine
->naked_p
1577 && cfun
->machine
->fp_as_gp_p
)
1579 fprintf (file
, "\t! ----------------------------------------\n");
1580 fprintf (file
, "\t! Guide linker to do "
1581 "link time optimization: fp-as-gp\n");
1582 fprintf (file
, "\t! We add one more instruction to "
1583 "initialize $fp near to $gp location.\n");
1584 fprintf (file
, "\t! If linker fails to use fp-as-gp transformation,\n");
1585 fprintf (file
, "\t! this extra instruction should be "
1586 "eliminated at link stage.\n");
1587 fprintf (file
, "\t.omit_fp_begin\n");
1588 fprintf (file
, "\tla\t$fp,_FP_BASE_\n");
1589 fprintf (file
, "\t! ----------------------------------------\n");
1593 /* Before rtl epilogue has been expanded, this function is used. */
1595 nds32_asm_function_begin_epilogue (FILE *file
)
1597 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1598 we can generate special directive: ".omit_fp_end"
1599 to claim fp-as-gp optimization range.
1600 However, for a naked function,
1601 which means it should not have prologue/epilogue,
1602 using fp-as-gp still requires saving $fp by push/pop behavior and
1603 there is no benefit to use fp-as-gp on such small function.
1604 So we need to make sure this function is NOT naked as well. */
1605 if (!frame_pointer_needed
1606 && !cfun
->machine
->naked_p
1607 && cfun
->machine
->fp_as_gp_p
)
1609 fprintf (file
, "\t! ----------------------------------------\n");
1610 fprintf (file
, "\t! Claim the range of fp-as-gp "
1611 "link time optimization\n");
1612 fprintf (file
, "\t.omit_fp_end\n");
1613 fprintf (file
, "\t! ----------------------------------------\n");
1616 fprintf (file
, "\t! BEGIN EPILOGUE\n");
1619 /* The content produced from this function
1620 will be placed after epilogue body. */
1622 nds32_asm_function_epilogue (FILE *file
)
1624 fprintf (file
, "\t! END EPILOGUE\n");
1628 nds32_asm_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
1629 HOST_WIDE_INT delta
,
1630 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
1635 /* Make sure unwind info is emitted for the thunk if needed. */
1636 final_start_function (emit_barrier (), file
, 1);
1638 this_regno
= (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
)
1644 if (satisfies_constraint_Is15 (GEN_INT (delta
)))
1646 fprintf (file
, "\taddi\t$r%d, $r%d, %ld\n",
1647 this_regno
, this_regno
, delta
);
1649 else if (satisfies_constraint_Is20 (GEN_INT (delta
)))
1651 fprintf (file
, "\tmovi\t$ta, %ld\n", delta
);
1652 fprintf (file
, "\tadd\t$r%d, $r%d, $ta\n", this_regno
, this_regno
);
1656 fprintf (file
, "\tsethi\t$ta, hi20(%ld)\n", delta
);
1657 fprintf (file
, "\tori\t$ta, $ta, lo12(%ld)\n", delta
);
1658 fprintf (file
, "\tadd\t$r%d, $r%d, $ta\n", this_regno
, this_regno
);
1662 fprintf (file
, "\tb\t");
1663 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
1664 fprintf (file
, "\n");
1666 final_end_function ();
1669 /* -- Permitting tail calls. */
1671 /* Determine whether we need to enable warning for function return check. */
1673 nds32_warn_func_return (tree decl
)
1675 /* Naked functions are implemented entirely in assembly, including the
1676 return sequence, so suppress warnings about this. */
1677 return !nds32_naked_function_p (decl
);
1681 /* Implementing the Varargs Macros. */
1684 nds32_setup_incoming_varargs (cumulative_args_t ca
,
1687 int *pretend_args_size
,
1688 int second_time ATTRIBUTE_UNUSED
)
1690 unsigned int total_args_regs
;
1691 unsigned int num_of_used_regs
;
1692 unsigned int remaining_reg_count
;
1693 CUMULATIVE_ARGS
*cum
;
1695 /* If we are under hard float abi, we do not need to set *pretend_args_size.
1696 So that all nameless arguments are pushed by caller and all situation
1697 can be handled by GCC itself. */
1698 if (TARGET_HARD_FLOAT
)
1701 /* We are using NDS32_MAX_GPR_REGS_FOR_ARGS registers,
1702 counting from NDS32_GPR_ARG_FIRST_REGNUM, for saving incoming arguments.
1703 However, for nameless(anonymous) arguments, we should push them on the
1704 stack so that all the nameless arguments appear to have been passed
1705 consecutively in the memory for accessing. Hence, we need to check and
1706 exclude the registers that are used for named arguments. */
1708 cum
= get_cumulative_args (ca
);
1710 /* The MODE and TYPE describe the last argument.
1711 We need those information to determine the remaining registers
1714 = NDS32_MAX_GPR_REGS_FOR_ARGS
+ NDS32_GPR_ARG_FIRST_REGNUM
;
1716 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum
->gpr_offset
, mode
, type
)
1717 + NDS32_NEED_N_REGS_FOR_ARG (mode
, type
);
1719 remaining_reg_count
= total_args_regs
- num_of_used_regs
;
1720 *pretend_args_size
= remaining_reg_count
* UNITS_PER_WORD
;
1726 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1728 /* If this hook returns true, the named argument of FUNCTION_ARG is always
1729 true for named arguments, and false for unnamed arguments. */
1734 /* Trampolines for Nested Functions. */
1737 nds32_asm_trampoline_template (FILE *f
)
1739 if (TARGET_REDUCED_REGS
)
1741 /* Trampoline is not supported on reduced-set registers yet. */
1742 sorry ("a nested function is not supported for reduced registers");
1746 asm_fprintf (f
, "\t! Trampoline code template\n");
1747 asm_fprintf (f
, "\t! This code fragment will be copied "
1748 "into stack on demand\n");
1750 asm_fprintf (f
, "\tmfusr\t$r16,$pc\n");
1751 asm_fprintf (f
, "\tlwi\t$r15,[$r16 + 20] "
1752 "! load nested function address\n");
1753 asm_fprintf (f
, "\tlwi\t$r16,[$r16 + 16] "
1754 "! load chain_value\n");
1755 asm_fprintf (f
, "\tjr\t$r15\n");
1758 /* Preserve space ($pc + 16) for saving chain_value,
1759 nds32_trampoline_init will fill the value in this slot. */
1760 asm_fprintf (f
, "\t! space for saving chain_value\n");
1761 assemble_aligned_integer (UNITS_PER_WORD
, const0_rtx
);
1763 /* Preserve space ($pc + 20) for saving nested function address,
1764 nds32_trampoline_init will fill the value in this slot. */
1765 asm_fprintf (f
, "\t! space for saving nested function address\n");
1766 assemble_aligned_integer (UNITS_PER_WORD
, const0_rtx
);
1769 /* Emit RTL insns to initialize the variable parts of a trampoline. */
1771 nds32_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
1775 /* Nested function address. */
1777 /* The memory rtx that is going to
1778 be filled with chain_value. */
1779 rtx chain_value_mem
;
1780 /* The memory rtx that is going to
1781 be filled with nested function address. */
1782 rtx nested_func_mem
;
1784 /* Start address of trampoline code in stack, for doing cache sync. */
1785 rtx sync_cache_addr
;
1786 /* Temporary register for sync instruction. */
1788 /* Instruction-cache sync instruction,
1789 requesting an argument as starting address. */
1791 /* For convenience reason of doing comparison. */
1792 int tramp_align_in_bytes
;
1794 /* Trampoline is not supported on reduced-set registers yet. */
1795 if (TARGET_REDUCED_REGS
)
1796 sorry ("a nested function is not supported for reduced registers");
1798 /* STEP 1: Copy trampoline code template into stack,
1799 fill up essential data into stack. */
1801 /* Extract nested function address rtx. */
1802 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
1804 /* m_tramp is memory rtx that is going to be filled with trampoline code.
1805 We have nds32_asm_trampoline_template() to emit template pattern. */
1806 emit_block_move (m_tramp
, assemble_trampoline_template (),
1807 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
1809 /* After copying trampoline code into stack,
1810 fill chain_value into stack. */
1811 chain_value_mem
= adjust_address (m_tramp
, SImode
, 16);
1812 emit_move_insn (chain_value_mem
, chain_value
);
1813 /* After copying trampoline code int stack,
1814 fill nested function address into stack. */
1815 nested_func_mem
= adjust_address (m_tramp
, SImode
, 20);
1816 emit_move_insn (nested_func_mem
, fnaddr
);
1818 /* STEP 2: Sync instruction-cache. */
1820 /* We have successfully filled trampoline code into stack.
1821 However, in order to execute code in stack correctly,
1822 we must sync instruction cache. */
1823 sync_cache_addr
= XEXP (m_tramp
, 0);
1824 tmp_reg
= gen_reg_rtx (SImode
);
1825 isync_insn
= gen_unspec_volatile_isync (tmp_reg
);
1827 /* Because nds32_cache_block_size is in bytes,
1828 we get trampoline alignment in bytes for convenient comparison. */
1829 tramp_align_in_bytes
= TRAMPOLINE_ALIGNMENT
/ BITS_PER_UNIT
;
1831 if (tramp_align_in_bytes
>= nds32_cache_block_size
1832 && (tramp_align_in_bytes
% nds32_cache_block_size
) == 0)
1834 /* Under this condition, the starting address of trampoline
1835 must be aligned to the starting address of each cache block
1836 and we do not have to worry about cross-boundary issue. */
1838 i
< (TRAMPOLINE_SIZE
+ nds32_cache_block_size
- 1)
1839 / nds32_cache_block_size
;
1842 emit_move_insn (tmp_reg
,
1843 plus_constant (Pmode
, sync_cache_addr
,
1844 nds32_cache_block_size
* i
));
1845 emit_insn (isync_insn
);
1848 else if (TRAMPOLINE_SIZE
> nds32_cache_block_size
)
1850 /* The starting address of trampoline code
1851 may not be aligned to the cache block,
1852 so the trampoline code may be across two cache block.
1853 We need to sync the last element, which is 4-byte size,
1854 of trampoline template. */
1856 i
< (TRAMPOLINE_SIZE
+ nds32_cache_block_size
- 1)
1857 / nds32_cache_block_size
;
1860 emit_move_insn (tmp_reg
,
1861 plus_constant (Pmode
, sync_cache_addr
,
1862 nds32_cache_block_size
* i
));
1863 emit_insn (isync_insn
);
1866 /* The last element of trampoline template is 4-byte size. */
1867 emit_move_insn (tmp_reg
,
1868 plus_constant (Pmode
, sync_cache_addr
,
1869 TRAMPOLINE_SIZE
- 4));
1870 emit_insn (isync_insn
);
1874 /* This is the simplest case.
1875 Because TRAMPOLINE_SIZE is less than or
1876 equal to nds32_cache_block_size,
1877 we can just sync start address and
1878 the last element of trampoline code. */
1880 /* Sync starting address of tampoline code. */
1881 emit_move_insn (tmp_reg
, sync_cache_addr
);
1882 emit_insn (isync_insn
);
1883 /* Sync the last element, which is 4-byte size,
1884 of trampoline template. */
1885 emit_move_insn (tmp_reg
,
1886 plus_constant (Pmode
, sync_cache_addr
,
1887 TRAMPOLINE_SIZE
- 4));
1888 emit_insn (isync_insn
);
1891 /* Set instruction serialization barrier
1892 to guarantee the correct operations. */
1893 emit_insn (gen_unspec_volatile_isb ());
1897 /* Addressing Modes. */
1900 nds32_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1902 /* For (mem:DI addr) or (mem:DF addr) case,
1903 we only allow 'addr' to be [reg], [symbol_ref],
1904 [const], or [reg + const_int] pattern. */
1905 if (mode
== DImode
|| mode
== DFmode
)
1907 /* Allow [Reg + const_int] addressing mode. */
1908 if (GET_CODE (x
) == PLUS
)
1910 if (nds32_address_register_rtx_p (XEXP (x
, 0), strict
)
1911 && nds32_legitimate_index_p (mode
, XEXP (x
, 1), strict
)
1912 && CONST_INT_P (XEXP (x
, 1)))
1915 else if (nds32_address_register_rtx_p (XEXP (x
, 1), strict
)
1916 && nds32_legitimate_index_p (mode
, XEXP (x
, 0), strict
)
1917 && CONST_INT_P (XEXP (x
, 0)))
1921 /* Now check [reg], [symbol_ref], and [const]. */
1922 if (GET_CODE (x
) != REG
1923 && GET_CODE (x
) != SYMBOL_REF
1924 && GET_CODE (x
) != CONST
)
1928 /* Check if 'x' is a valid address. */
1929 switch (GET_CODE (x
))
1932 /* (mem (reg A)) => [Ra] */
1933 return nds32_address_register_rtx_p (x
, strict
);
1936 /* (mem (symbol_ref A)) => [symbol_ref] */
1937 /* If -mcmodel=large, the 'symbol_ref' is not a valid address
1938 during or after LRA/reload phase. */
1939 if (TARGET_CMODEL_LARGE
1940 && (reload_completed
1941 || reload_in_progress
1942 || lra_in_progress
))
1944 /* If -mcmodel=medium and the symbol references to rodata section,
1945 the 'symbol_ref' is not a valid address during or after
1946 LRA/reload phase. */
1947 if (TARGET_CMODEL_MEDIUM
1948 && NDS32_SYMBOL_REF_RODATA_P (x
)
1949 && (reload_completed
1950 || reload_in_progress
1951 || lra_in_progress
))
1957 /* (mem (const (...)))
1958 => [ + const_addr ], where const_addr = symbol_ref + const_int */
1959 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
1961 rtx plus_op
= XEXP (x
, 0);
1963 rtx op0
= XEXP (plus_op
, 0);
1964 rtx op1
= XEXP (plus_op
, 1);
1966 if (GET_CODE (op0
) == SYMBOL_REF
&& CONST_INT_P (op1
))
1968 /* Now we see the [ + const_addr ] pattern, but we need
1969 some further checking. */
1970 /* If -mcmodel=large, the 'const_addr' is not a valid address
1971 during or after LRA/reload phase. */
1972 if (TARGET_CMODEL_LARGE
1973 && (reload_completed
1974 || reload_in_progress
1975 || lra_in_progress
))
1977 /* If -mcmodel=medium and the symbol references to rodata section,
1978 the 'const_addr' is not a valid address during or after
1979 LRA/reload phase. */
1980 if (TARGET_CMODEL_MEDIUM
1981 && NDS32_SYMBOL_REF_RODATA_P (op0
)
1982 && (reload_completed
1983 || reload_in_progress
1984 || lra_in_progress
))
1987 /* At this point we can make sure 'const_addr' is a
1996 /* (mem (post_modify (reg) (plus (reg) (reg))))
1998 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1999 => [Ra], const_int */
2000 if (GET_CODE (XEXP (x
, 0)) == REG
2001 && GET_CODE (XEXP (x
, 1)) == PLUS
)
2003 rtx plus_op
= XEXP (x
, 1);
2005 rtx op0
= XEXP (plus_op
, 0);
2006 rtx op1
= XEXP (plus_op
, 1);
2008 if (nds32_address_register_rtx_p (op0
, strict
)
2009 && nds32_legitimate_index_p (mode
, op1
, strict
))
2019 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2020 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2021 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2022 We only need to deal with register Ra. */
2023 if (nds32_address_register_rtx_p (XEXP (x
, 0), strict
))
2029 /* (mem (plus reg const_int))
2031 /* (mem (plus reg reg))
2033 /* (mem (plus (mult reg const_int) reg))
2034 => [Ra + Rb << sv] */
2035 if (nds32_address_register_rtx_p (XEXP (x
, 0), strict
)
2036 && nds32_legitimate_index_p (mode
, XEXP (x
, 1), strict
))
2038 else if (nds32_address_register_rtx_p (XEXP (x
, 1), strict
)
2039 && nds32_legitimate_index_p (mode
, XEXP (x
, 0), strict
))
2045 /* (mem (lo_sum (reg) (symbol_ref))) */
2046 /* (mem (lo_sum (reg) (const))) */
2047 gcc_assert (REG_P (XEXP (x
, 0)));
2048 if (GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
2049 || GET_CODE (XEXP (x
, 1)) == CONST
)
2050 return nds32_legitimate_address_p (mode
, XEXP (x
, 1), strict
);
2060 /* Describing Relative Costs of Operations. */
2063 nds32_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2067 if (from
== HIGH_REGS
|| to
== HIGH_REGS
)
2074 nds32_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2075 reg_class_t rclass ATTRIBUTE_UNUSED
,
2076 bool in ATTRIBUTE_UNUSED
)
2081 /* This target hook describes the relative costs of RTL expressions.
2082 Return 'true' when all subexpressions of x have been processed.
2083 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
2084 Refer to gcc/rtlanal.c for more information. */
2086 nds32_rtx_costs (rtx x
,
2093 return nds32_rtx_costs_impl (x
, mode
, outer_code
, opno
, total
, speed
);
2097 nds32_address_cost (rtx address
,
2102 return nds32_address_cost_impl (address
, mode
, as
, speed
);
2106 /* Dividing the Output into Sections (Texts, Data, . . . ). */
2108 /* If references to a symbol or a constant must be treated differently
2109 depending on something about the variable or function named by the symbol
2110 (such as what section it is in), we use this hook to store flags
2111 in symbol_ref rtx. */
2113 nds32_encode_section_info (tree decl
, rtx rtl
, int new_decl_p
)
2115 default_encode_section_info (decl
, rtl
, new_decl_p
);
2117 /* For the memory rtx, if it references to rodata section, we can store
2118 NDS32_SYMBOL_FLAG_RODATA flag into symbol_ref rtx so that the
2119 nds32_legitimate_address_p() can determine how to treat such symbol_ref
2120 based on -mcmodel=X and this information. */
2121 if (MEM_P (rtl
) && MEM_READONLY_P (rtl
))
2123 rtx addr
= XEXP (rtl
, 0);
2125 if (GET_CODE (addr
) == SYMBOL_REF
)
2127 /* For (mem (symbol_ref X)) case. */
2128 SYMBOL_REF_FLAGS (addr
) |= NDS32_SYMBOL_FLAG_RODATA
;
2130 else if (GET_CODE (addr
) == CONST
2131 && GET_CODE (XEXP (addr
, 0)) == PLUS
)
2133 /* For (mem (const (plus (symbol_ref X) (const_int N)))) case. */
2134 rtx plus_op
= XEXP (addr
, 0);
2135 rtx op0
= XEXP (plus_op
, 0);
2136 rtx op1
= XEXP (plus_op
, 1);
2138 if (GET_CODE (op0
) == SYMBOL_REF
&& CONST_INT_P (op1
))
2139 SYMBOL_REF_FLAGS (op0
) |= NDS32_SYMBOL_FLAG_RODATA
;
2145 /* Defining the Output Assembler Language. */
2147 /* -- The Overall Framework of an Assembler File. */
2150 nds32_asm_file_start (void)
2152 default_file_start ();
2154 /* Tell assembler which ABI we are using. */
2155 fprintf (asm_out_file
, "\t! ABI version\n");
2156 fprintf (asm_out_file
, "\t.abi_2\n");
2158 /* Tell assembler that this asm code is generated by compiler. */
2159 fprintf (asm_out_file
, "\t! This asm file is generated by compiler\n");
2160 fprintf (asm_out_file
, "\t.flag\tverbatim\n");
2161 /* Give assembler the size of each vector for interrupt handler. */
2162 fprintf (asm_out_file
, "\t! This vector size directive is required "
2163 "for checking inconsistency on interrupt handler\n");
2164 fprintf (asm_out_file
, "\t.vec_size\t%d\n", nds32_isr_vector_size
);
2166 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2169 fprintf (asm_out_file
, "\t! ISA family\t\t: %s\n", "V2");
2171 fprintf (asm_out_file
, "\t! ISA family\t\t: %s\n", "V3");
2173 fprintf (asm_out_file
, "\t! ISA family\t\t: %s\n", "V3M");
2175 if (TARGET_CMODEL_SMALL
)
2176 fprintf (asm_out_file
, "\t! Code model\t\t: %s\n", "SMALL");
2177 if (TARGET_CMODEL_MEDIUM
)
2178 fprintf (asm_out_file
, "\t! Code model\t\t: %s\n", "MEDIUM");
2179 if (TARGET_CMODEL_LARGE
)
2180 fprintf (asm_out_file
, "\t! Code model\t\t: %s\n", "LARGE");
2182 fprintf (asm_out_file
, "\t! Endian setting\t: %s\n",
2183 ((TARGET_BIG_ENDIAN
) ? "big-endian"
2184 : "little-endian"));
2186 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2188 fprintf (asm_out_file
, "\t! Use conditional move\t\t: %s\n",
2189 ((TARGET_CMOV
) ? "Yes"
2191 fprintf (asm_out_file
, "\t! Use performance extension\t: %s\n",
2192 ((TARGET_PERF_EXT
) ? "Yes"
2195 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2197 fprintf (asm_out_file
, "\t! V3PUSH instructions\t: %s\n",
2198 ((TARGET_V3PUSH
) ? "Yes"
2200 fprintf (asm_out_file
, "\t! 16-bit instructions\t: %s\n",
2201 ((TARGET_16_BIT
) ? "Yes"
2203 fprintf (asm_out_file
, "\t! Reduced registers set\t: %s\n",
2204 ((TARGET_REDUCED_REGS
) ? "Yes"
2207 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2210 fprintf (asm_out_file
, "\t! Optimization level\t: -Os\n");
2212 fprintf (asm_out_file
, "\t! Optimization level\t: -O%d\n", optimize
);
2214 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2216 fprintf (asm_out_file
, "\t! Cache block size\t: %d\n",
2217 nds32_cache_block_size
);
2219 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2221 nds32_asm_file_start_for_isr ();
2225 nds32_asm_file_end (void)
2227 nds32_asm_file_end_for_isr ();
2229 fprintf (asm_out_file
, "\t! ------------------------------------\n");
2232 /* -- Output and Generation of Labels. */
2235 nds32_asm_globalize_label (FILE *stream
, const char *name
)
2237 fputs ("\t.global\t", stream
);
2238 assemble_name (stream
, name
);
2239 fputs ("\n", stream
);
2242 /* -- Output of Assembler Instructions. */
2245 nds32_print_operand (FILE *stream
, rtx x
, int code
)
2252 /* Do nothing special. */
2256 /* 'x' is supposed to be CONST_INT, get the value. */
2257 gcc_assert (CONST_INT_P (x
));
2258 op_value
= INTVAL (x
);
2260 /* According to the Andes architecture,
2261 the system/user register index range is 0 ~ 1023.
2262 In order to avoid conflict between user-specified-integer value
2263 and enum-specified-register value,
2264 the 'enum nds32_intrinsic_registers' value
2265 in nds32_intrinsic.h starts from 1024. */
2266 if (op_value
< 1024 && op_value
>= 0)
2268 /* If user gives integer value directly (0~1023),
2269 we just print out the value. */
2270 fprintf (stream
, "%d", op_value
);
2272 else if (op_value
< 0
2273 || op_value
>= ((int) ARRAY_SIZE (nds32_intrinsic_register_names
)
2276 /* The enum index value for array size is out of range. */
2277 error ("intrinsic register index is out of range");
2281 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2282 we can print out register name. Remember to substract 1024. */
2283 fprintf (stream
, "%s",
2284 nds32_intrinsic_register_names
[op_value
- 1024]);
2287 /* No need to handle following process, so return immediately. */
2292 output_operand_lossage ("invalid operand output code");
2296 switch (GET_CODE (x
))
2300 output_addr_const (stream
, x
);
2304 /* Forbid using static chain register ($r16)
2305 on reduced-set registers configuration. */
2306 if (TARGET_REDUCED_REGS
2307 && REGNO (x
) == STATIC_CHAIN_REGNUM
)
2308 sorry ("a nested function is not supported for reduced registers");
2310 /* Normal cases, print out register name. */
2311 fputs (reg_names
[REGNO (x
)], stream
);
2315 output_address (GET_MODE (x
), XEXP (x
, 0));
2321 output_addr_const (stream
, x
);
2325 /* Generally, output_addr_const () is able to handle most cases.
2326 We want to see what CODE could appear,
2327 so we use gcc_unreachable() to stop it. */
2335 nds32_print_operand_address (FILE *stream
, machine_mode
/*mode*/, rtx x
)
2339 switch (GET_CODE (x
))
2343 /* [ + symbol_ref] */
2344 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2345 fputs ("[ + ", stream
);
2346 output_addr_const (stream
, x
);
2347 fputs ("]", stream
);
2351 /* Forbid using static chain register ($r16)
2352 on reduced-set registers configuration. */
2353 if (TARGET_REDUCED_REGS
2354 && REGNO (x
) == STATIC_CHAIN_REGNUM
)
2355 sorry ("a nested function is not supported for reduced registers");
2358 fprintf (stream
, "[%s]", reg_names
[REGNO (x
)]);
2365 /* Checking op0, forbid using static chain register ($r16)
2366 on reduced-set registers configuration. */
2367 if (TARGET_REDUCED_REGS
2369 && REGNO (op0
) == STATIC_CHAIN_REGNUM
)
2370 sorry ("a nested function is not supported for reduced registers");
2371 /* Checking op1, forbid using static chain register ($r16)
2372 on reduced-set registers configuration. */
2373 if (TARGET_REDUCED_REGS
2375 && REGNO (op1
) == STATIC_CHAIN_REGNUM
)
2376 sorry ("a nested function is not supported for reduced registers");
2378 if (REG_P (op0
) && CONST_INT_P (op1
))
2381 fprintf (stream
, "[%s + (%d)]",
2382 reg_names
[REGNO (op0
)], (int)INTVAL (op1
));
2384 else if (REG_P (op0
) && REG_P (op1
))
2387 fprintf (stream
, "[%s + %s]",
2388 reg_names
[REGNO (op0
)], reg_names
[REGNO (op1
)]);
2390 else if (GET_CODE (op0
) == MULT
&& REG_P (op1
))
2393 From observation, the pattern looks like:
2394 (plus:SI (mult:SI (reg:SI 58)
2395 (const_int 4 [0x4]))
2399 /* We need to set sv to output shift value. */
2400 if (INTVAL (XEXP (op0
, 1)) == 1)
2402 else if (INTVAL (XEXP (op0
, 1)) == 2)
2404 else if (INTVAL (XEXP (op0
, 1)) == 4)
2409 fprintf (stream
, "[%s + %s << %d]",
2410 reg_names
[REGNO (op1
)],
2411 reg_names
[REGNO (XEXP (op0
, 0))],
2416 /* The control flow is not supposed to be here. */
2424 /* (post_modify (regA) (plus (regA) (regB)))
2425 (post_modify (regA) (plus (regA) (const_int)))
2426 We would like to extract
2427 regA and regB (or const_int) from plus rtx. */
2428 op0
= XEXP (XEXP (x
, 1), 0);
2429 op1
= XEXP (XEXP (x
, 1), 1);
2431 /* Checking op0, forbid using static chain register ($r16)
2432 on reduced-set registers configuration. */
2433 if (TARGET_REDUCED_REGS
2435 && REGNO (op0
) == STATIC_CHAIN_REGNUM
)
2436 sorry ("a nested function is not supported for reduced registers");
2437 /* Checking op1, forbid using static chain register ($r16)
2438 on reduced-set registers configuration. */
2439 if (TARGET_REDUCED_REGS
2441 && REGNO (op1
) == STATIC_CHAIN_REGNUM
)
2442 sorry ("a nested function is not supported for reduced registers");
2444 if (REG_P (op0
) && REG_P (op1
))
2447 fprintf (stream
, "[%s], %s",
2448 reg_names
[REGNO (op0
)], reg_names
[REGNO (op1
)]);
2450 else if (REG_P (op0
) && CONST_INT_P (op1
))
2453 fprintf (stream
, "[%s], %d",
2454 reg_names
[REGNO (op0
)], (int)INTVAL (op1
));
2458 /* The control flow is not supposed to be here. */
2469 /* Checking op0, forbid using static chain register ($r16)
2470 on reduced-set registers configuration. */
2471 if (TARGET_REDUCED_REGS
2473 && REGNO (op0
) == STATIC_CHAIN_REGNUM
)
2474 sorry ("a nested function is not supported for reduced registers");
2478 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
2479 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2480 We only need to deal with register Ra. */
2481 fprintf (stream
, "[%s]", reg_names
[REGNO (op0
)]);
2485 /* The control flow is not supposed to be here. */
2493 /* Generally, output_addr_const () is able to handle most cases.
2494 We want to see what CODE could appear,
2495 so we use gcc_unreachable() to stop it. */
2503 /* Defining target-specific uses of __attribute__. */
2505 /* Add some checking after merging attributes. */
2507 nds32_merge_decl_attributes (tree olddecl
, tree newdecl
)
2509 tree combined_attrs
;
2511 /* Create combined attributes. */
2512 combined_attrs
= merge_attributes (DECL_ATTRIBUTES (olddecl
),
2513 DECL_ATTRIBUTES (newdecl
));
2515 /* Since newdecl is acutally a duplicate of olddecl,
2516 we can take olddecl for some operations. */
2517 if (TREE_CODE (olddecl
) == FUNCTION_DECL
)
2519 /* Check isr-specific attributes conflict. */
2520 nds32_check_isr_attrs_conflict (olddecl
, combined_attrs
);
2523 return combined_attrs
;
2526 /* Add some checking when inserting attributes. */
2528 nds32_insert_attributes (tree decl
, tree
*attributes
)
2530 /* For function declaration, we need to check isr-specific attributes:
2531 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
2532 2. Check valid integer value for interrupt/exception.
2533 3. Check valid integer value for reset.
2534 4. Check valid function for nmi/warm. */
2535 if (TREE_CODE (decl
) == FUNCTION_DECL
)
2538 tree intr
, excp
, reset
;
2540 /* Pick up function attributes. */
2541 func_attrs
= *attributes
;
2543 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
2544 nds32_check_isr_attrs_conflict (decl
, func_attrs
);
2546 /* Now we are starting to check valid id value
2547 for interrupt/exception/reset.
2548 Note that we ONLY check its validity here.
2549 To construct isr vector information, it is still performed
2550 by nds32_construct_isr_vectors_information(). */
2551 intr
= lookup_attribute ("interrupt", func_attrs
);
2552 excp
= lookup_attribute ("exception", func_attrs
);
2553 reset
= lookup_attribute ("reset", func_attrs
);
2557 /* Deal with interrupt/exception. */
2559 unsigned int lower_bound
, upper_bound
;
2561 /* The way to handle interrupt or exception is the same,
2562 we just need to take care of actual vector number.
2563 For interrupt(0..63), the actual vector number is (9..72).
2564 For exception(1..8), the actual vector number is (1..8). */
2565 lower_bound
= (intr
) ? (0) : (1);
2566 upper_bound
= (intr
) ? (63) : (8);
2568 /* Prepare id list so that we can traverse id value. */
2569 id_list
= (intr
) ? (TREE_VALUE (intr
)) : (TREE_VALUE (excp
));
2571 /* 2. Check valid integer value for interrupt/exception. */
2576 /* Pick up each vector id value. */
2577 id
= TREE_VALUE (id_list
);
2578 /* Issue error if it is not a valid integer value. */
2579 if (TREE_CODE (id
) != INTEGER_CST
2580 || wi::ltu_p (id
, lower_bound
)
2581 || wi::gtu_p (id
, upper_bound
))
2582 error ("invalid id value for interrupt/exception attribute");
2584 /* Advance to next id. */
2585 id_list
= TREE_CHAIN (id_list
);
2590 /* Deal with reset. */
2594 unsigned int lower_bound
;
2595 unsigned int upper_bound
;
2597 /* Prepare id_list and identify id value so that
2598 we can check if total number of vectors is valid. */
2599 id_list
= TREE_VALUE (reset
);
2600 id
= TREE_VALUE (id_list
);
2602 /* The maximum numbers for user's interrupt is 64. */
2606 /* 3. Check valid integer value for reset. */
2607 if (TREE_CODE (id
) != INTEGER_CST
2608 || wi::ltu_p (id
, lower_bound
)
2609 || wi::gtu_p (id
, upper_bound
))
2610 error ("invalid id value for reset attribute");
2612 /* 4. Check valid function for nmi/warm. */
2613 nmi
= lookup_attribute ("nmi", func_attrs
);
2614 warm
= lookup_attribute ("warm", func_attrs
);
2616 if (nmi
!= NULL_TREE
)
2621 nmi_func_list
= TREE_VALUE (nmi
);
2622 nmi_func
= TREE_VALUE (nmi_func_list
);
2624 /* Issue error if it is not a valid nmi function. */
2625 if (TREE_CODE (nmi_func
) != IDENTIFIER_NODE
)
2626 error ("invalid nmi function for reset attribute");
2629 if (warm
!= NULL_TREE
)
2631 tree warm_func_list
;
2634 warm_func_list
= TREE_VALUE (warm
);
2635 warm_func
= TREE_VALUE (warm_func_list
);
2637 /* Issue error if it is not a valid warm function. */
2638 if (TREE_CODE (warm_func
) != IDENTIFIER_NODE
)
2639 error ("invalid warm function for reset attribute");
2644 /* No interrupt, exception, or reset attribute is set. */
2651 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED
,
2652 tree pop_target ATTRIBUTE_UNUSED
)
2654 /* Currently, we do not parse any pragma target by ourself,
2655 so just simply return false. */
2660 nds32_option_override (void)
2662 /* After all the command options have been parsed,
2663 we shall deal with some flags for changing compiler settings. */
2665 /* At first, we check if we have to strictly
2666 set some flags based on ISA family. */
2669 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
2670 target_flags
&= ~MASK_V3PUSH
;
2674 /* Under V3 ISA, currently nothing should be strictly set. */
2678 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
2679 target_flags
|= MASK_REDUCED_REGS
;
2680 /* Under V3M ISA, we need to strictly disable TARGET_PERF_EXT. */
2681 target_flags
&= ~MASK_PERF_EXT
;
2684 /* See if we are using reduced-set registers:
2685 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
2686 If so, we must forbid using $r11~$r14, $r16~$r27. */
2687 if (TARGET_REDUCED_REGS
)
2691 /* Prevent register allocator from
2692 choosing it as doing register allocation. */
2693 for (r
= 11; r
<= 14; r
++)
2694 fixed_regs
[r
] = call_used_regs
[r
] = 1;
2695 for (r
= 16; r
<= 27; r
++)
2696 fixed_regs
[r
] = call_used_regs
[r
] = 1;
2701 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
2702 target_flags
&= ~MASK_V3PUSH
;
2705 /* Currently, we don't support PIC code generation yet. */
2707 sorry ("position-independent code not supported");
2711 /* Miscellaneous Parameters. */
2714 nds32_init_builtins (void)
2716 nds32_init_builtins_impl ();
2720 nds32_expand_builtin (tree exp
,
2726 return nds32_expand_builtin_impl (exp
, target
, subtarget
, mode
, ignore
);
2730 /* ------------------------------------------------------------------------ */
2732 /* PART 4: Implemet extern function definitions,
2733 the prototype is in nds32-protos.h. */
2735 /* Defining Data Structures for Per-function Information. */
2738 nds32_init_expanders (void)
2740 /* Arrange to initialize and mark the machine per-function status. */
2741 init_machine_status
= nds32_init_machine_status
;
2745 /* Register Usage. */
2747 /* -- How Values Fit in Registers. */
2750 nds32_hard_regno_nregs (int regno ATTRIBUTE_UNUSED
,
2753 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
2756 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2759 nds32_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2761 /* Restrict double-word quantities to even register pairs. */
2762 if (HARD_REGNO_NREGS (regno
, mode
) == 1
2769 #undef TARGET_HARD_REGNO_MODE_OK
2770 #define TARGET_HARD_REGNO_MODE_OK nds32_hard_regno_mode_ok
2772 /* Register Classes. */
2775 nds32_regno_reg_class (int regno
)
2777 /* Refer to nds32.h for more register class details. */
2779 if (regno
>= 0 && regno
<= 7)
2781 else if (regno
>= 8 && regno
<= 11)
2783 else if (regno
>= 12 && regno
<= 14)
2785 else if (regno
== 15)
2787 else if (regno
>= 16 && regno
<= 19)
2789 else if (regno
>= 20 && regno
<= 31)
2791 else if (regno
== 32 || regno
== 33)
2798 /* Stack Layout and Calling Conventions. */
2800 /* -- Basic Stack Layout. */
2803 nds32_return_addr_rtx (int count
,
2804 rtx frameaddr ATTRIBUTE_UNUSED
)
2806 /* There is no way to determine the return address
2807 if frameaddr is the frame that has 'count' steps
2808 up from current frame. */
2812 /* If count == 0, it means we are at current frame,
2813 the return address is $r30 ($lp). */
2814 return get_hard_reg_initial_val (Pmode
, LP_REGNUM
);
2817 /* -- Eliminating Frame Pointer and Arg Pointer. */
2820 nds32_initial_elimination_offset (unsigned int from_reg
, unsigned int to_reg
)
2822 HOST_WIDE_INT offset
;
2824 /* Compute and setup stack frame size.
2825 The result will be in cfun->machine. */
2826 nds32_compute_stack_frame ();
2828 /* Remember to consider
2829 cfun->machine->callee_saved_area_padding_bytes
2830 when calculating offset. */
2831 if (from_reg
== ARG_POINTER_REGNUM
&& to_reg
== STACK_POINTER_REGNUM
)
2833 offset
= (cfun
->machine
->fp_size
2834 + cfun
->machine
->gp_size
2835 + cfun
->machine
->lp_size
2836 + cfun
->machine
->callee_saved_gpr_regs_size
2837 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
2838 + cfun
->machine
->local_size
2839 + cfun
->machine
->out_args_size
);
2841 else if (from_reg
== ARG_POINTER_REGNUM
2842 && to_reg
== HARD_FRAME_POINTER_REGNUM
)
2846 else if (from_reg
== FRAME_POINTER_REGNUM
2847 && to_reg
== STACK_POINTER_REGNUM
)
2849 offset
= (cfun
->machine
->local_size
+ cfun
->machine
->out_args_size
);
2851 else if (from_reg
== FRAME_POINTER_REGNUM
2852 && to_reg
== HARD_FRAME_POINTER_REGNUM
)
2854 offset
= (-1) * (cfun
->machine
->fp_size
2855 + cfun
->machine
->gp_size
2856 + cfun
->machine
->lp_size
2857 + cfun
->machine
->callee_saved_gpr_regs_size
2858 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
);
2868 /* -- Passing Arguments in Registers. */
2871 nds32_init_cumulative_args (CUMULATIVE_ARGS
*cum
,
2872 tree fntype ATTRIBUTE_UNUSED
,
2873 rtx libname ATTRIBUTE_UNUSED
,
2874 tree fndecl ATTRIBUTE_UNUSED
,
2875 int n_named_args ATTRIBUTE_UNUSED
)
2877 /* Initial available registers
2878 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
2879 for passing arguments. */
2880 cum
->gpr_offset
= 0;
2883 /* -- Function Entry and Exit. */
2885 /* Function for normal multiple push prologue. */
2887 nds32_expand_prologue (void)
2894 rtx fp_adjust_insn
, sp_adjust_insn
;
2896 /* Compute and setup stack frame size.
2897 The result will be in cfun->machine. */
2898 nds32_compute_stack_frame ();
2900 /* If this is a variadic function, first we need to push argument
2901 registers that hold the unnamed argument value. */
2902 if (cfun
->machine
->va_args_size
!= 0)
2904 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->va_args_first_regno
);
2905 Re
= gen_rtx_REG (SImode
, cfun
->machine
->va_args_last_regno
);
2906 /* No need to push $fp, $gp, or $lp, so use GEN_INT(0). */
2907 nds32_emit_stack_push_multiple (Rb
, Re
, GEN_INT (0), true);
2909 /* We may also need to adjust stack pointer for padding bytes
2910 because varargs may cause $sp not 8-byte aligned. */
2911 if (cfun
->machine
->va_args_area_padding_bytes
)
2913 /* Generate sp adjustment instruction. */
2914 sp_adjust
= cfun
->machine
->va_args_area_padding_bytes
;
2915 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
2917 GEN_INT (-1 * sp_adjust
));
2919 /* Emit rtx into instructions list and receive INSN rtx form. */
2920 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
2922 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2923 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2924 generate CFI (Call Frame Information) stuff. */
2925 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
2929 /* If the function is 'naked',
2930 we do not have to generate prologue code fragment. */
2931 if (cfun
->machine
->naked_p
)
2934 /* Get callee_first_regno and callee_last_regno. */
2935 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
2936 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
2938 /* nds32_emit_stack_push_multiple(first_regno, last_regno),
2939 the pattern 'stack_push_multiple' is implemented in nds32.md.
2940 For En4 field, we have to calculate its constant value.
2941 Refer to Andes ISA for more information. */
2943 if (cfun
->machine
->fp_size
)
2945 if (cfun
->machine
->gp_size
)
2947 if (cfun
->machine
->lp_size
)
2950 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2951 to be saved, we don't have to create multiple push instruction.
2952 Otherwise, a multiple push instruction is needed. */
2953 if (!(REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
&& en4_const
== 0))
2955 /* Create multiple push instruction rtx. */
2956 nds32_emit_stack_push_multiple (Rb
, Re
, GEN_INT (en4_const
), false);
2959 /* Check frame_pointer_needed to see
2960 if we shall emit fp adjustment instruction. */
2961 if (frame_pointer_needed
)
2963 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
2964 + (4 * callee-saved-registers)
2965 Note: No need to adjust
2966 cfun->machine->callee_saved_area_padding_bytes,
2967 because, at this point, stack pointer is just
2968 at the position after push instruction. */
2969 fp_adjust
= cfun
->machine
->fp_size
2970 + cfun
->machine
->gp_size
2971 + cfun
->machine
->lp_size
2972 + cfun
->machine
->callee_saved_gpr_regs_size
;
2973 fp_adjust_insn
= gen_addsi3 (hard_frame_pointer_rtx
,
2975 GEN_INT (fp_adjust
));
2976 /* Emit rtx into instructions list and receive INSN rtx form. */
2977 fp_adjust_insn
= emit_insn (fp_adjust_insn
);
2979 /* The insn rtx 'fp_adjust_insn' will change frame layout. */
2980 RTX_FRAME_RELATED_P (fp_adjust_insn
) = 1;
2983 /* Adjust $sp = $sp - local_size - out_args_size
2984 - callee_saved_area_padding_bytes. */
2985 sp_adjust
= cfun
->machine
->local_size
2986 + cfun
->machine
->out_args_size
2987 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
2988 /* sp_adjust value may be out of range of the addi instruction,
2989 create alternative add behavior with TA_REGNUM if necessary,
2990 using NEGATIVE value to tell that we are decreasing address. */
2991 sp_adjust
= nds32_force_addi_stack_int ( (-1) * sp_adjust
);
2994 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
2995 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
2997 GEN_INT (-1 * sp_adjust
));
2998 /* Emit rtx into instructions list and receive INSN rtx form. */
2999 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3001 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3002 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3003 generate CFI (Call Frame Information) stuff. */
3004 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3007 /* Prevent the instruction scheduler from
3008 moving instructions across the boundary. */
3009 emit_insn (gen_blockage ());
3012 /* Function for normal multiple pop epilogue. */
3014 nds32_expand_epilogue (bool sibcall_p
)
3022 /* Compute and setup stack frame size.
3023 The result will be in cfun->machine. */
3024 nds32_compute_stack_frame ();
3026 /* Prevent the instruction scheduler from
3027 moving instructions across the boundary. */
3028 emit_insn (gen_blockage ());
3030 /* If the function is 'naked', we do not have to generate
3031 epilogue code fragment BUT 'ret' instruction.
3032 However, if this function is also a variadic function,
3033 we need to create adjust stack pointer before 'ret' instruction. */
3034 if (cfun
->machine
->naked_p
)
3036 /* If this is a variadic function, we do not have to restore argument
3037 registers but need to adjust stack pointer back to previous stack
3038 frame location before return. */
3039 if (cfun
->machine
->va_args_size
!= 0)
3041 /* Generate sp adjustment instruction.
3042 We need to consider padding bytes here. */
3043 sp_adjust
= cfun
->machine
->va_args_size
3044 + cfun
->machine
->va_args_area_padding_bytes
;
3045 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3047 GEN_INT (sp_adjust
));
3048 /* Emit rtx into instructions list and receive INSN rtx form. */
3049 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3051 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3052 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3053 generate CFI (Call Frame Information) stuff. */
3054 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3057 /* Generate return instruction by using 'return_internal' pattern.
3058 Make sure this instruction is after gen_blockage(). */
3060 emit_jump_insn (gen_return_internal ());
3064 if (frame_pointer_needed
)
3066 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
3067 - (4 * callee-saved-registers)
3068 Note: No need to adjust
3069 cfun->machine->callee_saved_area_padding_bytes,
3070 because we want to adjust stack pointer
3071 to the position for pop instruction. */
3072 sp_adjust
= cfun
->machine
->fp_size
3073 + cfun
->machine
->gp_size
3074 + cfun
->machine
->lp_size
3075 + cfun
->machine
->callee_saved_gpr_regs_size
;
3076 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3077 hard_frame_pointer_rtx
,
3078 GEN_INT (-1 * sp_adjust
));
3079 /* Emit rtx into instructions list and receive INSN rtx form. */
3080 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3082 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3083 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3087 /* If frame pointer is NOT needed,
3088 we cannot calculate the sp adjustment from frame pointer.
3089 Instead, we calculate the adjustment by local_size,
3090 out_args_size, and callee_saved_area_padding_bytes.
3091 Notice that such sp adjustment value may be out of range,
3092 so we have to deal with it as well. */
3094 /* Adjust $sp = $sp + local_size + out_args_size
3095 + callee_saved_area_padding_bytes. */
3096 sp_adjust
= cfun
->machine
->local_size
3097 + cfun
->machine
->out_args_size
3098 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3099 /* sp_adjust value may be out of range of the addi instruction,
3100 create alternative add behavior with TA_REGNUM if necessary,
3101 using POSITIVE value to tell that we are increasing address. */
3102 sp_adjust
= nds32_force_addi_stack_int (sp_adjust
);
3105 /* Generate sp adjustment instruction
3106 if and only if sp_adjust != 0. */
3107 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3109 GEN_INT (sp_adjust
));
3110 /* Emit rtx into instructions list and receive INSN rtx form. */
3111 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3113 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3114 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3118 /* Get callee_first_regno and callee_last_regno. */
3119 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
3120 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
3122 /* nds32_emit_stack_pop_multiple(first_regno, last_regno),
3123 the pattern 'stack_pop_multiple' is implementad in nds32.md.
3124 For En4 field, we have to calculate its constant value.
3125 Refer to Andes ISA for more information. */
3127 if (cfun
->machine
->fp_size
)
3129 if (cfun
->machine
->gp_size
)
3131 if (cfun
->machine
->lp_size
)
3134 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3135 to be saved, we don't have to create multiple pop instruction.
3136 Otherwise, a multiple pop instruction is needed. */
3137 if (!(REGNO (Rb
) == SP_REGNUM
&& REGNO (Re
) == SP_REGNUM
&& en4_const
== 0))
3139 /* Create multiple pop instruction rtx. */
3140 nds32_emit_stack_pop_multiple (Rb
, Re
, GEN_INT (en4_const
));
3143 /* If this is a variadic function, we do not have to restore argument
3144 registers but need to adjust stack pointer back to previous stack
3145 frame location before return. */
3146 if (cfun
->machine
->va_args_size
!= 0)
3148 /* Generate sp adjustment instruction.
3149 We need to consider padding bytes here. */
3150 sp_adjust
= cfun
->machine
->va_args_size
3151 + cfun
->machine
->va_args_area_padding_bytes
;
3152 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3154 GEN_INT (sp_adjust
));
3155 /* Emit rtx into instructions list and receive INSN rtx form. */
3156 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3158 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3159 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3160 generate CFI (Call Frame Information) stuff. */
3161 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3164 /* Generate return instruction. */
3166 emit_jump_insn (gen_return_internal ());
3169 /* Function for v3push prologue. */
3171 nds32_expand_prologue_v3push (void)
3177 rtx fp_adjust_insn
, sp_adjust_insn
;
3179 /* Compute and setup stack frame size.
3180 The result will be in cfun->machine. */
3181 nds32_compute_stack_frame ();
3183 /* If the function is 'naked',
3184 we do not have to generate prologue code fragment. */
3185 if (cfun
->machine
->naked_p
)
3188 /* Get callee_first_regno and callee_last_regno. */
3189 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
3190 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
3192 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
3193 where imm8u has to be 8-byte alignment. */
3194 sp_adjust
= cfun
->machine
->local_size
3195 + cfun
->machine
->out_args_size
3196 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3198 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust
))
3199 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust
))
3201 /* We can use 'push25 Re,imm8u'. */
3203 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3204 the pattern 'stack_v3push' is implemented in nds32.md.
3205 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3206 nds32_emit_stack_v3push (Rb
, Re
,
3207 GEN_INT (14), GEN_INT (sp_adjust
));
3209 /* Check frame_pointer_needed to see
3210 if we shall emit fp adjustment instruction. */
3211 if (frame_pointer_needed
)
3213 /* adjust $fp = $sp + 4 ($fp size)
3216 + (4 * n) (callee-saved registers)
3217 + sp_adjust ('push25 Re,imm8u')
3218 Note: Since we use 'push25 Re,imm8u',
3219 the position of stack pointer is further
3220 changed after push instruction.
3221 Hence, we need to take sp_adjust value
3222 into consideration. */
3223 fp_adjust
= cfun
->machine
->fp_size
3224 + cfun
->machine
->gp_size
3225 + cfun
->machine
->lp_size
3226 + cfun
->machine
->callee_saved_gpr_regs_size
3228 fp_adjust_insn
= gen_addsi3 (hard_frame_pointer_rtx
,
3230 GEN_INT (fp_adjust
));
3231 /* Emit rtx into instructions list and receive INSN rtx form. */
3232 fp_adjust_insn
= emit_insn (fp_adjust_insn
);
3237 /* We have to use 'push25 Re,0' and
3238 expand one more instruction to adjust $sp later. */
3240 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3241 the pattern 'stack_v3push' is implemented in nds32.md.
3242 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3243 nds32_emit_stack_v3push (Rb
, Re
,
3244 GEN_INT (14), GEN_INT (0));
3246 /* Check frame_pointer_needed to see
3247 if we shall emit fp adjustment instruction. */
3248 if (frame_pointer_needed
)
3250 /* adjust $fp = $sp + 4 ($fp size)
3253 + (4 * n) (callee-saved registers)
3254 Note: Since we use 'push25 Re,0',
3255 the stack pointer is just at the position
3256 after push instruction.
3257 No need to take sp_adjust into consideration. */
3258 fp_adjust
= cfun
->machine
->fp_size
3259 + cfun
->machine
->gp_size
3260 + cfun
->machine
->lp_size
3261 + cfun
->machine
->callee_saved_gpr_regs_size
;
3262 fp_adjust_insn
= gen_addsi3 (hard_frame_pointer_rtx
,
3264 GEN_INT (fp_adjust
));
3265 /* Emit rtx into instructions list and receive INSN rtx form. */
3266 fp_adjust_insn
= emit_insn (fp_adjust_insn
);
3269 /* Because we use 'push25 Re,0',
3270 we need to expand one more instruction to adjust $sp.
3271 However, sp_adjust value may be out of range of the addi instruction,
3272 create alternative add behavior with TA_REGNUM if necessary,
3273 using NEGATIVE value to tell that we are decreasing address. */
3274 sp_adjust
= nds32_force_addi_stack_int ( (-1) * sp_adjust
);
3277 /* Generate sp adjustment instruction
3278 if and only if sp_adjust != 0. */
3279 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3281 GEN_INT (-1 * sp_adjust
));
3282 /* Emit rtx into instructions list and receive INSN rtx form. */
3283 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3285 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3286 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3287 generate CFI (Call Frame Information) stuff. */
3288 RTX_FRAME_RELATED_P (sp_adjust_insn
) = 1;
3292 /* Prevent the instruction scheduler from
3293 moving instructions across the boundary. */
3294 emit_insn (gen_blockage ());
3297 /* Function for v3pop epilogue. */
3299 nds32_expand_epilogue_v3pop (bool sibcall_p
)
3306 /* Compute and setup stack frame size.
3307 The result will be in cfun->machine. */
3308 nds32_compute_stack_frame ();
3310 /* Prevent the instruction scheduler from
3311 moving instructions across the boundary. */
3312 emit_insn (gen_blockage ());
3314 /* If the function is 'naked', we do not have to generate
3315 epilogue code fragment BUT 'ret' instruction. */
3316 if (cfun
->machine
->naked_p
)
3318 /* Generate return instruction by using 'return_internal' pattern.
3319 Make sure this instruction is after gen_blockage(). */
3321 emit_jump_insn (gen_return_internal ());
3325 /* Get callee_first_regno and callee_last_regno. */
3326 Rb
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_first_gpr_regno
);
3327 Re
= gen_rtx_REG (SImode
, cfun
->machine
->callee_saved_last_gpr_regno
);
3329 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3330 where imm8u has to be 8-byte alignment. */
3331 sp_adjust
= cfun
->machine
->local_size
3332 + cfun
->machine
->out_args_size
3333 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3335 /* We have to consider alloca issue as well.
3336 If the function does call alloca(), the stack pointer is not fixed.
3337 In that case, we cannot use 'pop25 Re,imm8u' directly.
3338 We have to caculate stack pointer from frame pointer
3339 and then use 'pop25 Re,0'.
3340 Of course, the frame_pointer_needed should be nonzero
3341 if the function calls alloca(). */
3342 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust
))
3343 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust
)
3344 && !cfun
->calls_alloca
)
3346 /* We can use 'pop25 Re,imm8u'. */
3348 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3349 the pattern 'stack_v3pop' is implementad in nds32.md.
3350 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3351 nds32_emit_stack_v3pop (Rb
, Re
,
3352 GEN_INT (14), GEN_INT (sp_adjust
));
3356 /* We have to use 'pop25 Re,0', and prior to it,
3357 we must expand one more instruction to adjust $sp. */
3359 if (frame_pointer_needed
)
3361 /* adjust $sp = $fp - 4 ($fp size)
3364 - (4 * n) (callee-saved registers)
3365 Note: No need to adjust
3366 cfun->machine->callee_saved_area_padding_bytes,
3367 because we want to adjust stack pointer
3368 to the position for pop instruction. */
3369 sp_adjust
= cfun
->machine
->fp_size
3370 + cfun
->machine
->gp_size
3371 + cfun
->machine
->lp_size
3372 + cfun
->machine
->callee_saved_gpr_regs_size
;
3373 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3374 hard_frame_pointer_rtx
,
3375 GEN_INT (-1 * sp_adjust
));
3376 /* Emit rtx into instructions list and receive INSN rtx form. */
3377 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3381 /* If frame pointer is NOT needed,
3382 we cannot calculate the sp adjustment from frame pointer.
3383 Instead, we calculate the adjustment by local_size,
3384 out_args_size, and callee_saved_area_padding_bytes.
3385 Notice that such sp adjustment value may be out of range,
3386 so we have to deal with it as well. */
3388 /* Adjust $sp = $sp + local_size + out_args_size
3389 + callee_saved_area_padding_bytes. */
3390 sp_adjust
= cfun
->machine
->local_size
3391 + cfun
->machine
->out_args_size
3392 + cfun
->machine
->callee_saved_area_gpr_padding_bytes
;
3393 /* sp_adjust value may be out of range of the addi instruction,
3394 create alternative add behavior with TA_REGNUM if necessary,
3395 using POSITIVE value to tell that we are increasing address. */
3396 sp_adjust
= nds32_force_addi_stack_int (sp_adjust
);
3399 /* Generate sp adjustment instruction
3400 if and only if sp_adjust != 0. */
3401 sp_adjust_insn
= gen_addsi3 (stack_pointer_rtx
,
3403 GEN_INT (sp_adjust
));
3404 /* Emit rtx into instructions list and receive INSN rtx form. */
3405 sp_adjust_insn
= emit_insn (sp_adjust_insn
);
3409 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3410 the pattern 'stack_v3pop' is implementad in nds32.md. */
3411 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3412 nds32_emit_stack_v3pop (Rb
, Re
,
3413 GEN_INT (14), GEN_INT (0));
3416 /* Generate return instruction. */
3417 emit_jump_insn (gen_pop25return ());
3420 /* Return nonzero if this function is known to have a null epilogue.
3421 This allows the optimizer to omit jumps to jumps if no stack
3424 nds32_can_use_return_insn (void)
3426 /* Prior to reloading, we can't tell how many registers must be saved.
3427 Thus we can not determine whether this function has null epilogue. */
3428 if (!reload_completed
)
3431 /* If no stack was created, two conditions must be satisfied:
3432 1. This is a naked function.
3433 So there is no callee-saved, local size, or outgoing size.
3434 2. This is NOT a variadic function.
3435 So there is no pushing arguement registers into the stack. */
3436 return (cfun
->machine
->naked_p
&& (cfun
->machine
->va_args_size
== 0));
3439 /* ------------------------------------------------------------------------ */
3441 /* Function to test 333-form for load/store instructions.
3442 This is auxiliary extern function for auxiliary macro in nds32.h.
3443 Because it is a little complicated, we use function instead of macro. */
3445 nds32_ls_333_p (rtx rt
, rtx ra
, rtx imm
, machine_mode mode
)
3447 if (REGNO_REG_CLASS (REGNO (rt
)) == LOW_REGS
3448 && REGNO_REG_CLASS (REGNO (ra
)) == LOW_REGS
)
3450 if (GET_MODE_SIZE (mode
) == 4)
3451 return satisfies_constraint_Iu05 (imm
);
3453 if (GET_MODE_SIZE (mode
) == 2)
3454 return satisfies_constraint_Iu04 (imm
);
3456 if (GET_MODE_SIZE (mode
) == 1)
3457 return satisfies_constraint_Iu03 (imm
);
3464 /* Computing the Length of an Insn.
3465 Modifies the length assigned to instruction INSN.
3466 LEN is the initially computed length of the insn. */
3468 nds32_adjust_insn_length (rtx_insn
*insn
, int length
)
3472 switch (recog_memoized (insn
))
3474 case CODE_FOR_move_df
:
3475 case CODE_FOR_move_di
:
3476 /* Adjust length of movd44 to 2. */
3477 src
= XEXP (PATTERN (insn
), 1);
3478 dst
= XEXP (PATTERN (insn
), 0);
3482 && (REGNO (src
) % 2) == 0
3483 && (REGNO (dst
) % 2) == 0)
3495 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
3497 nds32_target_alignment (rtx_insn
*label
)
3504 insn
= next_active_insn (label
);
3508 else if ((get_attr_length (insn
) % 4) == 0)
3514 /* ------------------------------------------------------------------------ */
3516 /* PART 5: Initialize target hook structure and definitions. */
3518 /* Controlling the Compilation Driver. */
3521 /* Run-time Target Specification. */
3524 /* Defining Data Structures for Per-function Information. */
3527 /* Storage Layout. */
3529 #undef TARGET_PROMOTE_FUNCTION_MODE
3530 #define TARGET_PROMOTE_FUNCTION_MODE \
3531 default_promote_function_mode_always_promote
3534 /* Layout of Source Language Data Types. */
3537 /* Register Usage. */
3539 /* -- Basic Characteristics of Registers. */
3541 /* -- Order of Allocation of Registers. */
3543 /* -- How Values Fit in Registers. */
3545 /* -- Handling Leaf Functions. */
3547 /* -- Registers That Form a Stack. */
3550 /* Register Classes. */
3552 #undef TARGET_CLASS_MAX_NREGS
3553 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
3555 #undef TARGET_REGISTER_PRIORITY
3556 #define TARGET_REGISTER_PRIORITY nds32_register_priority
3559 /* Obsolete Macros for Defining Constraints. */
3562 /* Stack Layout and Calling Conventions. */
3564 /* -- Basic Stack Layout. */
3566 /* -- Exception Handling Support. */
3568 /* -- Specifying How Stack Checking is Done. */
3570 /* -- Registers That Address the Stack Frame. */
3572 /* -- Eliminating Frame Pointer and Arg Pointer. */
3574 #undef TARGET_CAN_ELIMINATE
3575 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
3577 /* -- Passing Function Arguments on the Stack. */
3579 /* -- Passing Arguments in Registers. */
3581 #undef TARGET_FUNCTION_ARG
3582 #define TARGET_FUNCTION_ARG nds32_function_arg
3584 #undef TARGET_MUST_PASS_IN_STACK
3585 #define TARGET_MUST_PASS_IN_STACK nds32_must_pass_in_stack
3587 #undef TARGET_ARG_PARTIAL_BYTES
3588 #define TARGET_ARG_PARTIAL_BYTES nds32_arg_partial_bytes
3590 #undef TARGET_FUNCTION_ARG_ADVANCE
3591 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
3593 #undef TARGET_FUNCTION_ARG_BOUNDARY
3594 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
3596 /* -- How Scalar Function Values Are Returned. */
3598 #undef TARGET_FUNCTION_VALUE
3599 #define TARGET_FUNCTION_VALUE nds32_function_value
3601 #undef TARGET_LIBCALL_VALUE
3602 #define TARGET_LIBCALL_VALUE nds32_libcall_value
3604 #undef TARGET_FUNCTION_VALUE_REGNO_P
3605 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
3607 /* -- How Large Values Are Returned. */
3609 /* -- Caller-Saves Register Allocation. */
3611 /* -- Function Entry and Exit. */
3613 #undef TARGET_ASM_FUNCTION_PROLOGUE
3614 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
3616 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
3617 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
3619 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
3620 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
3622 #undef TARGET_ASM_FUNCTION_EPILOGUE
3623 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
3625 #undef TARGET_ASM_OUTPUT_MI_THUNK
3626 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
3628 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3629 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
3631 /* -- Generating Code for Profiling. */
3633 /* -- Permitting tail calls. */
3635 #undef TARGET_WARN_FUNC_RETURN
3636 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
3638 /* Stack smashing protection. */
3641 /* Implementing the Varargs Macros. */
3643 #undef TARGET_SETUP_INCOMING_VARARGS
3644 #define TARGET_SETUP_INCOMING_VARARGS nds32_setup_incoming_varargs
3646 #undef TARGET_STRICT_ARGUMENT_NAMING
3647 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
3650 /* Trampolines for Nested Functions. */
3652 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
3653 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
3655 #undef TARGET_TRAMPOLINE_INIT
3656 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
3659 /* Implicit Calls to Library Routines. */
3662 /* Addressing Modes. */
3664 #undef TARGET_LEGITIMATE_ADDRESS_P
3665 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
3668 /* Anchored Addresses. */
3671 /* Condition Code Status. */
3673 /* -- Representation of condition codes using (cc0). */
3675 /* -- Representation of condition codes using registers. */
3677 /* -- Macros to control conditional execution. */
3680 /* Describing Relative Costs of Operations. */
3682 #undef TARGET_REGISTER_MOVE_COST
3683 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
3685 #undef TARGET_MEMORY_MOVE_COST
3686 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
3688 #undef TARGET_RTX_COSTS
3689 #define TARGET_RTX_COSTS nds32_rtx_costs
3691 #undef TARGET_ADDRESS_COST
3692 #define TARGET_ADDRESS_COST nds32_address_cost
3695 /* Adjusting the Instruction Scheduler. */
3698 /* Dividing the Output into Sections (Texts, Data, . . . ). */
3700 #undef TARGET_ENCODE_SECTION_INFO
3701 #define TARGET_ENCODE_SECTION_INFO nds32_encode_section_info
3704 /* Position Independent Code. */
3707 /* Defining the Output Assembler Language. */
3709 /* -- The Overall Framework of an Assembler File. */
3711 #undef TARGET_ASM_FILE_START
3712 #define TARGET_ASM_FILE_START nds32_asm_file_start
3713 #undef TARGET_ASM_FILE_END
3714 #define TARGET_ASM_FILE_END nds32_asm_file_end
3716 /* -- Output of Data. */
3718 #undef TARGET_ASM_ALIGNED_HI_OP
3719 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3721 #undef TARGET_ASM_ALIGNED_SI_OP
3722 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
3724 /* -- Output of Uninitialized Variables. */
3726 /* -- Output and Generation of Labels. */
3728 #undef TARGET_ASM_GLOBALIZE_LABEL
3729 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
3731 /* -- How Initialization Functions Are Handled. */
3733 /* -- Macros Controlling Initialization Routines. */
3735 /* -- Output of Assembler Instructions. */
3737 #undef TARGET_PRINT_OPERAND
3738 #define TARGET_PRINT_OPERAND nds32_print_operand
3739 #undef TARGET_PRINT_OPERAND_ADDRESS
3740 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
3742 /* -- Output of Dispatch Tables. */
3744 /* -- Assembler Commands for Exception Regions. */
3746 /* -- Assembler Commands for Alignment. */
3749 /* Controlling Debugging Information Format. */
3751 /* -- Macros Affecting All Debugging Formats. */
3753 /* -- Specific Options for DBX Output. */
3755 /* -- Open-Ended Hooks for DBX Format. */
3757 /* -- File Names in DBX Format. */
3759 /* -- Macros for SDB and DWARF Output. */
3761 /* -- Macros for VMS Debug Format. */
3764 /* Cross Compilation and Floating Point. */
3767 /* Mode Switching Instructions. */
3770 /* Defining target-specific uses of __attribute__. */
3772 #undef TARGET_ATTRIBUTE_TABLE
3773 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
3775 #undef TARGET_MERGE_DECL_ATTRIBUTES
3776 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
3778 #undef TARGET_INSERT_ATTRIBUTES
3779 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
3781 #undef TARGET_OPTION_PRAGMA_PARSE
3782 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
3784 #undef TARGET_OPTION_OVERRIDE
3785 #define TARGET_OPTION_OVERRIDE nds32_option_override
3788 /* Emulating TLS. */
3791 /* Defining coprocessor specifics for MIPS targets. */
3794 /* Parameters for Precompiled Header Validity Checking. */
3797 /* C++ ABI parameters. */
3800 /* Adding support for named address spaces. */
3803 /* Miscellaneous Parameters. */
3805 #undef TARGET_INIT_BUILTINS
3806 #define TARGET_INIT_BUILTINS nds32_init_builtins
3808 #undef TARGET_EXPAND_BUILTIN
3809 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
3812 /* ------------------------------------------------------------------------ */
3814 /* Initialize the GCC target structure. */
3816 struct gcc_target targetm
= TARGET_INITIALIZER
;
3818 /* ------------------------------------------------------------------------ */