]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/nds32/nds32.c
tree-core.h (struct attribute_spec): Swap affects_type_identity and handler fields.
[thirdparty/gcc.git] / gcc / config / nds32 / nds32.c
1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* ------------------------------------------------------------------------ */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "stringpool.h"
31 #include "attribs.h"
32 #include "df.h"
33 #include "memmodel.h"
34 #include "tm_p.h"
35 #include "optabs.h" /* For GEN_FCN. */
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "stor-layout.h"
41 #include "varasm.h"
42 #include "calls.h"
43 #include "output.h"
44 #include "explow.h"
45 #include "expr.h"
46 #include "tm-constrs.h"
47 #include "builtins.h"
48
49 /* This file should be included last. */
50 #include "target-def.h"
51
52 /* ------------------------------------------------------------------------ */
53
54 /* This file is divided into five parts:
55
56 PART 1: Auxiliary static variable definitions and
57 target hook static variable definitions.
58
59 PART 2: Auxiliary static function definitions.
60
61 PART 3: Implement target hook stuff definitions.
62
63 PART 4: Implemet extern function definitions,
64 the prototype is in nds32-protos.h.
65
66 PART 5: Initialize target hook structure and definitions. */
67
68 /* ------------------------------------------------------------------------ */
69
70 /* PART 1: Auxiliary static variable definitions and
71 target hook static variable definitions. */
72
73 /* Define intrinsic register names.
74 Please refer to nds32_intrinsic.h file, the index is corresponding to
75 'enum nds32_intrinsic_registers' data type values.
76 NOTE that the base value starting from 1024. */
77 static const char * const nds32_intrinsic_register_names[] =
78 {
79 "$PSW", "$IPSW", "$ITYPE", "$IPC"
80 };
81
82 /* Defining target-specific uses of __attribute__. */
83 static const struct attribute_spec nds32_attribute_table[] =
84 {
85 /* Syntax: { name, min_len, max_len, decl_required, type_required,
86 function_type_required, affects_type_identity, handler,
87 exclude } */
88
89 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
90 { "interrupt", 1, 64, false, false, false, false, NULL, NULL },
91 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
92 { "exception", 1, 8, false, false, false, false, NULL, NULL },
93 /* Argument is user's interrupt numbers. The vector number is always 0. */
94 { "reset", 1, 1, false, false, false, false, NULL, NULL },
95
96 /* The attributes describing isr nested type. */
97 { "nested", 0, 0, false, false, false, false, NULL, NULL },
98 { "not_nested", 0, 0, false, false, false, false, NULL, NULL },
99 { "nested_ready", 0, 0, false, false, false, false, NULL, NULL },
100
101 /* The attributes describing isr register save scheme. */
102 { "save_all", 0, 0, false, false, false, false, NULL, NULL },
103 { "partial_save", 0, 0, false, false, false, false, NULL, NULL },
104
105 /* The attributes used by reset attribute. */
106 { "nmi", 1, 1, false, false, false, false, NULL, NULL },
107 { "warm", 1, 1, false, false, false, false, NULL, NULL },
108
109 /* The attribute telling no prologue/epilogue. */
110 { "naked", 0, 0, false, false, false, false, NULL, NULL },
111
112 /* The last attribute spec is set to be NULL. */
113 { NULL, 0, 0, false, false, false, false, NULL, NULL }
114 };
115
116
117 /* ------------------------------------------------------------------------ */
118
119 /* PART 2: Auxiliary static function definitions. */
120
121 /* Function to save and restore machine-specific function data. */
122 static struct machine_function *
123 nds32_init_machine_status (void)
124 {
125 struct machine_function *machine;
126 machine = ggc_cleared_alloc<machine_function> ();
127
128 /* Initially assume this function needs prologue/epilogue. */
129 machine->naked_p = 0;
130
131 /* Initially assume this function does NOT use fp_as_gp optimization. */
132 machine->fp_as_gp_p = 0;
133
134 return machine;
135 }
136
137 /* Function to compute stack frame size and
138 store into cfun->machine structure. */
139 static void
140 nds32_compute_stack_frame (void)
141 {
142 int r;
143 int block_size;
144
145 /* Because nds32_compute_stack_frame() will be called from different place,
146 everytime we enter this function, we have to assume this function
147 needs prologue/epilogue. */
148 cfun->machine->naked_p = 0;
149
150 /* Get variadic arguments size to prepare pretend arguments and
151 we will push them into stack at prologue by ourself. */
152 cfun->machine->va_args_size = crtl->args.pretend_args_size;
153 if (cfun->machine->va_args_size != 0)
154 {
155 cfun->machine->va_args_first_regno
156 = NDS32_GPR_ARG_FIRST_REGNUM
157 + NDS32_MAX_GPR_REGS_FOR_ARGS
158 - (crtl->args.pretend_args_size / UNITS_PER_WORD);
159 cfun->machine->va_args_last_regno
160 = NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS - 1;
161 }
162 else
163 {
164 cfun->machine->va_args_first_regno = SP_REGNUM;
165 cfun->machine->va_args_last_regno = SP_REGNUM;
166 }
167
168 /* Important: We need to make sure that varargs area is 8-byte alignment. */
169 block_size = cfun->machine->va_args_size;
170 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
171 {
172 cfun->machine->va_args_area_padding_bytes
173 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
174 }
175
176 /* Get local variables, incoming variables, and temporary variables size.
177 Note that we need to make sure it is 8-byte alignment because
178 there may be no padding bytes if we are using LRA. */
179 cfun->machine->local_size = NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
180
181 /* Get outgoing arguments size. */
182 cfun->machine->out_args_size = crtl->outgoing_args_size;
183
184 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
185 Check whether $fp is ever live. */
186 cfun->machine->fp_size = (df_regs_ever_live_p (FP_REGNUM)) ? 4 : 0;
187
188 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
189 Check whether we are using PIC code genration. */
190 cfun->machine->gp_size = (flag_pic) ? 4 : 0;
191
192 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
193 Check whether $lp is ever live. */
194 cfun->machine->lp_size = (df_regs_ever_live_p (LP_REGNUM)) ? 4 : 0;
195
196 /* Initially there is no padding bytes. */
197 cfun->machine->callee_saved_area_gpr_padding_bytes = 0;
198
199 /* Calculate the bytes of saving callee-saved registers on stack. */
200 cfun->machine->callee_saved_gpr_regs_size = 0;
201 cfun->machine->callee_saved_first_gpr_regno = SP_REGNUM;
202 cfun->machine->callee_saved_last_gpr_regno = SP_REGNUM;
203 /* Currently, there is no need to check $r28~$r31
204 because we will save them in another way. */
205 for (r = 0; r < 28; r++)
206 {
207 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
208 {
209 /* Mark the first required callee-saved register
210 (only need to set it once).
211 If first regno == SP_REGNUM, we can tell that
212 it is the first time to be here. */
213 if (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM)
214 cfun->machine->callee_saved_first_gpr_regno = r;
215 /* Mark the last required callee-saved register. */
216 cfun->machine->callee_saved_last_gpr_regno = r;
217 }
218 }
219
220 /* Check if this function can omit prologue/epilogue code fragment.
221 If there is 'naked' attribute in this function,
222 we can set 'naked_p' flag to indicate that
223 we do not have to generate prologue/epilogue.
224 Or, if all the following conditions succeed,
225 we can set this function 'naked_p' as well:
226 condition 1: first_regno == last_regno == SP_REGNUM,
227 which means we do not have to save
228 any callee-saved registers.
229 condition 2: Both $lp and $fp are NOT live in this function,
230 which means we do not need to save them and there
231 is no outgoing size.
232 condition 3: There is no local_size, which means
233 we do not need to adjust $sp. */
234 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
235 || (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM
236 && cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM
237 && !df_regs_ever_live_p (FP_REGNUM)
238 && !df_regs_ever_live_p (LP_REGNUM)
239 && cfun->machine->local_size == 0))
240 {
241 /* Set this function 'naked_p' and other functions can check this flag.
242 Note that in nds32 port, the 'naked_p = 1' JUST means there is no
243 callee-saved, local size, and outgoing size.
244 The varargs space and ret instruction may still present in
245 the prologue/epilogue expanding. */
246 cfun->machine->naked_p = 1;
247
248 /* No need to save $fp, $gp, and $lp.
249 We should set these value to be zero
250 so that nds32_initial_elimination_offset() can work properly. */
251 cfun->machine->fp_size = 0;
252 cfun->machine->gp_size = 0;
253 cfun->machine->lp_size = 0;
254
255 /* If stack usage computation is required,
256 we need to provide the static stack size. */
257 if (flag_stack_usage_info)
258 current_function_static_stack_size = 0;
259
260 /* No need to do following adjustment, return immediately. */
261 return;
262 }
263
264 /* Adjustment for v3push instructions:
265 If we are using v3push (push25/pop25) instructions,
266 we need to make sure Rb is $r6 and Re is
267 located on $r6, $r8, $r10, or $r14.
268 Some results above will be discarded and recomputed.
269 Note that it is only available under V3/V3M ISA and we
270 DO NOT setup following stuff for isr or variadic function. */
271 if (TARGET_V3PUSH
272 && !nds32_isr_function_p (current_function_decl)
273 && (cfun->machine->va_args_size == 0))
274 {
275 /* Recompute:
276 cfun->machine->fp_size
277 cfun->machine->gp_size
278 cfun->machine->lp_size
279 cfun->machine->callee_saved_first_gpr_regno
280 cfun->machine->callee_saved_last_gpr_regno */
281
282 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
283 cfun->machine->fp_size = 4;
284 cfun->machine->gp_size = 4;
285 cfun->machine->lp_size = 4;
286
287 /* Remember to set Rb = $r6. */
288 cfun->machine->callee_saved_first_gpr_regno = 6;
289
290 if (cfun->machine->callee_saved_last_gpr_regno <= 6)
291 {
292 /* Re = $r6 */
293 cfun->machine->callee_saved_last_gpr_regno = 6;
294 }
295 else if (cfun->machine->callee_saved_last_gpr_regno <= 8)
296 {
297 /* Re = $r8 */
298 cfun->machine->callee_saved_last_gpr_regno = 8;
299 }
300 else if (cfun->machine->callee_saved_last_gpr_regno <= 10)
301 {
302 /* Re = $r10 */
303 cfun->machine->callee_saved_last_gpr_regno = 10;
304 }
305 else if (cfun->machine->callee_saved_last_gpr_regno <= 14)
306 {
307 /* Re = $r14 */
308 cfun->machine->callee_saved_last_gpr_regno = 14;
309 }
310 else if (cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM)
311 {
312 /* If last_regno is SP_REGNUM, which means
313 it is never changed, so set it to Re = $r6. */
314 cfun->machine->callee_saved_last_gpr_regno = 6;
315 }
316 else
317 {
318 /* The program flow should not go here. */
319 gcc_unreachable ();
320 }
321 }
322
323 /* We have correctly set callee_saved_first_gpr_regno
324 and callee_saved_last_gpr_regno.
325 Initially, the callee_saved_gpr_regs_size is supposed to be 0.
326 As long as callee_saved_last_gpr_regno is not SP_REGNUM,
327 we can update callee_saved_gpr_regs_size with new size. */
328 if (cfun->machine->callee_saved_last_gpr_regno != SP_REGNUM)
329 {
330 /* Compute pushed size of callee-saved registers. */
331 cfun->machine->callee_saved_gpr_regs_size
332 = 4 * (cfun->machine->callee_saved_last_gpr_regno
333 - cfun->machine->callee_saved_first_gpr_regno
334 + 1);
335 }
336
337 /* Important: We need to make sure that
338 (fp_size + gp_size + lp_size + callee_saved_gpr_regs_size)
339 is 8-byte alignment.
340 If it is not, calculate the padding bytes. */
341 block_size = cfun->machine->fp_size
342 + cfun->machine->gp_size
343 + cfun->machine->lp_size
344 + cfun->machine->callee_saved_gpr_regs_size;
345 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
346 {
347 cfun->machine->callee_saved_area_gpr_padding_bytes
348 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
349 }
350
351 /* If stack usage computation is required,
352 we need to provide the static stack size. */
353 if (flag_stack_usage_info)
354 {
355 current_function_static_stack_size
356 = NDS32_ROUND_UP_DOUBLE_WORD (block_size)
357 + cfun->machine->local_size
358 + cfun->machine->out_args_size;
359 }
360 }
361
362 /* Function to create a parallel rtx pattern
363 which presents stack push multiple behavior.
364 The overall concept are:
365 "push registers to memory",
366 "adjust stack pointer". */
367 static void
368 nds32_emit_stack_push_multiple (rtx Rb, rtx Re, rtx En4, bool vaarg_p)
369 {
370 int regno;
371 int extra_count;
372 int num_use_regs;
373 int par_index;
374 int offset;
375 int save_fp, save_gp, save_lp;
376
377 rtx reg;
378 rtx mem;
379 rtx push_rtx;
380 rtx adjust_sp_rtx;
381 rtx parallel_insn;
382 rtx dwarf;
383
384 /* We need to provide a customized rtx which contains
385 necessary information for data analysis,
386 so we create a parallel rtx like this:
387 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
388 (reg:SI Rb))
389 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
390 (reg:SI Rb+1))
391 ...
392 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
393 (reg:SI Re))
394 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
395 (reg:SI FP_REGNUM))
396 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
397 (reg:SI GP_REGNUM))
398 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
399 (reg:SI LP_REGNUM))
400 (set (reg:SI SP_REGNUM)
401 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
402
403 /* Determine whether we need to save $fp, $gp, or $lp. */
404 save_fp = INTVAL (En4) & 0x8;
405 save_gp = INTVAL (En4) & 0x4;
406 save_lp = INTVAL (En4) & 0x2;
407
408 /* Calculate the number of registers that will be pushed. */
409 extra_count = 0;
410 if (save_fp)
411 extra_count++;
412 if (save_gp)
413 extra_count++;
414 if (save_lp)
415 extra_count++;
416 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
417 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
418 num_use_regs = extra_count;
419 else
420 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
421
422 /* In addition to used registers,
423 we need one more space for (set sp sp-x) rtx. */
424 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
425 rtvec_alloc (num_use_regs + 1));
426 par_index = 0;
427
428 /* Initialize offset and start to create push behavior. */
429 offset = -(num_use_regs * 4);
430
431 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
432 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
433 {
434 /* Rb and Re may be SP_REGNUM.
435 We need to break this loop immediately. */
436 if (regno == SP_REGNUM)
437 break;
438
439 reg = gen_rtx_REG (SImode, regno);
440 mem = gen_frame_mem (SImode, plus_constant (Pmode,
441 stack_pointer_rtx,
442 offset));
443 push_rtx = gen_rtx_SET (mem, reg);
444 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
445 RTX_FRAME_RELATED_P (push_rtx) = 1;
446 offset = offset + 4;
447 par_index++;
448 }
449
450 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
451 if (save_fp)
452 {
453 reg = gen_rtx_REG (SImode, FP_REGNUM);
454 mem = gen_frame_mem (SImode, plus_constant (Pmode,
455 stack_pointer_rtx,
456 offset));
457 push_rtx = gen_rtx_SET (mem, reg);
458 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
459 RTX_FRAME_RELATED_P (push_rtx) = 1;
460 offset = offset + 4;
461 par_index++;
462 }
463 if (save_gp)
464 {
465 reg = gen_rtx_REG (SImode, GP_REGNUM);
466 mem = gen_frame_mem (SImode, plus_constant (Pmode,
467 stack_pointer_rtx,
468 offset));
469 push_rtx = gen_rtx_SET (mem, reg);
470 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
471 RTX_FRAME_RELATED_P (push_rtx) = 1;
472 offset = offset + 4;
473 par_index++;
474 }
475 if (save_lp)
476 {
477 reg = gen_rtx_REG (SImode, LP_REGNUM);
478 mem = gen_frame_mem (SImode, plus_constant (Pmode,
479 stack_pointer_rtx,
480 offset));
481 push_rtx = gen_rtx_SET (mem, reg);
482 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
483 RTX_FRAME_RELATED_P (push_rtx) = 1;
484 offset = offset + 4;
485 par_index++;
486 }
487
488 /* Create (set sp sp-x). */
489
490 /* We need to re-calculate the offset value again for adjustment. */
491 offset = -(num_use_regs * 4);
492 adjust_sp_rtx
493 = gen_rtx_SET (stack_pointer_rtx,
494 plus_constant (Pmode, stack_pointer_rtx, offset));
495 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
496 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
497
498 parallel_insn = emit_insn (parallel_insn);
499
500 /* The insn rtx 'parallel_insn' will change frame layout.
501 We need to use RTX_FRAME_RELATED_P so that GCC is able to
502 generate CFI (Call Frame Information) stuff. */
503 RTX_FRAME_RELATED_P (parallel_insn) = 1;
504
505 /* Don't use GCC's logic for CFI info if we are generate a push for VAARG
506 since we will not restore those register at epilogue. */
507 if (vaarg_p)
508 {
509 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA,
510 copy_rtx (adjust_sp_rtx), NULL_RTX);
511 REG_NOTES (parallel_insn) = dwarf;
512 }
513 }
514
515 /* Function to create a parallel rtx pattern
516 which presents stack pop multiple behavior.
517 The overall concept are:
518 "pop registers from memory",
519 "adjust stack pointer". */
520 static void
521 nds32_emit_stack_pop_multiple (rtx Rb, rtx Re, rtx En4)
522 {
523 int regno;
524 int extra_count;
525 int num_use_regs;
526 int par_index;
527 int offset;
528 int save_fp, save_gp, save_lp;
529
530 rtx reg;
531 rtx mem;
532 rtx pop_rtx;
533 rtx adjust_sp_rtx;
534 rtx parallel_insn;
535 rtx dwarf = NULL_RTX;
536
537 /* We need to provide a customized rtx which contains
538 necessary information for data analysis,
539 so we create a parallel rtx like this:
540 (parallel [(set (reg:SI Rb)
541 (mem (reg:SI SP_REGNUM)))
542 (set (reg:SI Rb+1)
543 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
544 ...
545 (set (reg:SI Re)
546 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
547 (set (reg:SI FP_REGNUM)
548 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
549 (set (reg:SI GP_REGNUM)
550 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
551 (set (reg:SI LP_REGNUM)
552 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
553 (set (reg:SI SP_REGNUM)
554 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
555
556 /* Determine whether we need to restore $fp, $gp, or $lp. */
557 save_fp = INTVAL (En4) & 0x8;
558 save_gp = INTVAL (En4) & 0x4;
559 save_lp = INTVAL (En4) & 0x2;
560
561 /* Calculate the number of registers that will be poped. */
562 extra_count = 0;
563 if (save_fp)
564 extra_count++;
565 if (save_gp)
566 extra_count++;
567 if (save_lp)
568 extra_count++;
569 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
570 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
571 num_use_regs = extra_count;
572 else
573 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
574
575 /* In addition to used registers,
576 we need one more space for (set sp sp+x) rtx. */
577 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
578 rtvec_alloc (num_use_regs + 1));
579 par_index = 0;
580
581 /* Initialize offset and start to create pop behavior. */
582 offset = 0;
583
584 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
585 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
586 {
587 /* Rb and Re may be SP_REGNUM.
588 We need to break this loop immediately. */
589 if (regno == SP_REGNUM)
590 break;
591
592 reg = gen_rtx_REG (SImode, regno);
593 mem = gen_frame_mem (SImode, plus_constant (Pmode,
594 stack_pointer_rtx,
595 offset));
596 pop_rtx = gen_rtx_SET (reg, mem);
597 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
598 RTX_FRAME_RELATED_P (pop_rtx) = 1;
599 offset = offset + 4;
600 par_index++;
601
602 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
603 }
604
605 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
606 if (save_fp)
607 {
608 reg = gen_rtx_REG (SImode, FP_REGNUM);
609 mem = gen_frame_mem (SImode, plus_constant (Pmode,
610 stack_pointer_rtx,
611 offset));
612 pop_rtx = gen_rtx_SET (reg, mem);
613 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
614 RTX_FRAME_RELATED_P (pop_rtx) = 1;
615 offset = offset + 4;
616 par_index++;
617
618 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
619 }
620 if (save_gp)
621 {
622 reg = gen_rtx_REG (SImode, GP_REGNUM);
623 mem = gen_frame_mem (SImode, plus_constant (Pmode,
624 stack_pointer_rtx,
625 offset));
626 pop_rtx = gen_rtx_SET (reg, mem);
627 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
628 RTX_FRAME_RELATED_P (pop_rtx) = 1;
629 offset = offset + 4;
630 par_index++;
631
632 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
633 }
634 if (save_lp)
635 {
636 reg = gen_rtx_REG (SImode, LP_REGNUM);
637 mem = gen_frame_mem (SImode, plus_constant (Pmode,
638 stack_pointer_rtx,
639 offset));
640 pop_rtx = gen_rtx_SET (reg, mem);
641 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
642 RTX_FRAME_RELATED_P (pop_rtx) = 1;
643 offset = offset + 4;
644 par_index++;
645
646 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
647 }
648
649 /* Create (set sp sp+x). */
650
651 /* The offset value is already in place. No need to re-calculate it. */
652 adjust_sp_rtx
653 = gen_rtx_SET (stack_pointer_rtx,
654 plus_constant (Pmode, stack_pointer_rtx, offset));
655 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
656
657 /* Tell gcc we adjust SP in this insn. */
658 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
659
660 parallel_insn = emit_insn (parallel_insn);
661
662 /* The insn rtx 'parallel_insn' will change frame layout.
663 We need to use RTX_FRAME_RELATED_P so that GCC is able to
664 generate CFI (Call Frame Information) stuff. */
665 RTX_FRAME_RELATED_P (parallel_insn) = 1;
666
667 /* Add CFI info by manual. */
668 REG_NOTES (parallel_insn) = dwarf;
669 }
670
671 /* Function to create a parallel rtx pattern
672 which presents stack v3push behavior.
673 The overall concept are:
674 "push registers to memory",
675 "adjust stack pointer". */
676 static void
677 nds32_emit_stack_v3push (rtx Rb,
678 rtx Re,
679 rtx En4 ATTRIBUTE_UNUSED,
680 rtx imm8u)
681 {
682 int regno;
683 int num_use_regs;
684 int par_index;
685 int offset;
686
687 rtx reg;
688 rtx mem;
689 rtx push_rtx;
690 rtx adjust_sp_rtx;
691 rtx parallel_insn;
692
693 /* We need to provide a customized rtx which contains
694 necessary information for data analysis,
695 so we create a parallel rtx like this:
696 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
697 (reg:SI Rb))
698 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
699 (reg:SI Rb+1))
700 ...
701 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
702 (reg:SI Re))
703 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
704 (reg:SI FP_REGNUM))
705 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
706 (reg:SI GP_REGNUM))
707 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
708 (reg:SI LP_REGNUM))
709 (set (reg:SI SP_REGNUM)
710 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
711
712 /* Calculate the number of registers that will be pushed.
713 Since $fp, $gp, and $lp is always pushed with v3push instruction,
714 we need to count these three registers.
715 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
716 So there is no need to worry about Rb=Re=SP_REGNUM case. */
717 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
718
719 /* In addition to used registers,
720 we need one more space for (set sp sp-x-imm8u) rtx. */
721 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
722 rtvec_alloc (num_use_regs + 1));
723 par_index = 0;
724
725 /* Initialize offset and start to create push behavior. */
726 offset = -(num_use_regs * 4);
727
728 /* Create (set mem regX) from Rb, Rb+1 up to Re.
729 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
730 So there is no need to worry about Rb=Re=SP_REGNUM case. */
731 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
732 {
733 reg = gen_rtx_REG (SImode, regno);
734 mem = gen_frame_mem (SImode, plus_constant (Pmode,
735 stack_pointer_rtx,
736 offset));
737 push_rtx = gen_rtx_SET (mem, reg);
738 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
739 RTX_FRAME_RELATED_P (push_rtx) = 1;
740 offset = offset + 4;
741 par_index++;
742 }
743
744 /* Create (set mem fp). */
745 reg = gen_rtx_REG (SImode, FP_REGNUM);
746 mem = gen_frame_mem (SImode, plus_constant (Pmode,
747 stack_pointer_rtx,
748 offset));
749 push_rtx = gen_rtx_SET (mem, reg);
750 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
751 RTX_FRAME_RELATED_P (push_rtx) = 1;
752 offset = offset + 4;
753 par_index++;
754 /* Create (set mem gp). */
755 reg = gen_rtx_REG (SImode, GP_REGNUM);
756 mem = gen_frame_mem (SImode, plus_constant (Pmode,
757 stack_pointer_rtx,
758 offset));
759 push_rtx = gen_rtx_SET (mem, reg);
760 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
761 RTX_FRAME_RELATED_P (push_rtx) = 1;
762 offset = offset + 4;
763 par_index++;
764 /* Create (set mem lp). */
765 reg = gen_rtx_REG (SImode, LP_REGNUM);
766 mem = gen_frame_mem (SImode, plus_constant (Pmode,
767 stack_pointer_rtx,
768 offset));
769 push_rtx = gen_rtx_SET (mem, reg);
770 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
771 RTX_FRAME_RELATED_P (push_rtx) = 1;
772 offset = offset + 4;
773 par_index++;
774
775 /* Create (set sp sp-x-imm8u). */
776
777 /* We need to re-calculate the offset value again for adjustment. */
778 offset = -(num_use_regs * 4);
779 adjust_sp_rtx
780 = gen_rtx_SET (stack_pointer_rtx,
781 plus_constant (Pmode,
782 stack_pointer_rtx,
783 offset - INTVAL (imm8u)));
784 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
785 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
786
787 parallel_insn = emit_insn (parallel_insn);
788
789 /* The insn rtx 'parallel_insn' will change frame layout.
790 We need to use RTX_FRAME_RELATED_P so that GCC is able to
791 generate CFI (Call Frame Information) stuff. */
792 RTX_FRAME_RELATED_P (parallel_insn) = 1;
793 }
794
795 /* Function to create a parallel rtx pattern
796 which presents stack v3pop behavior.
797 The overall concept are:
798 "pop registers from memory",
799 "adjust stack pointer". */
800 static void
801 nds32_emit_stack_v3pop (rtx Rb,
802 rtx Re,
803 rtx En4 ATTRIBUTE_UNUSED,
804 rtx imm8u)
805 {
806 int regno;
807 int num_use_regs;
808 int par_index;
809 int offset;
810
811 rtx reg;
812 rtx mem;
813 rtx pop_rtx;
814 rtx adjust_sp_rtx;
815 rtx parallel_insn;
816 rtx dwarf = NULL_RTX;
817
818 /* We need to provide a customized rtx which contains
819 necessary information for data analysis,
820 so we create a parallel rtx like this:
821 (parallel [(set (reg:SI Rb)
822 (mem (reg:SI SP_REGNUM)))
823 (set (reg:SI Rb+1)
824 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
825 ...
826 (set (reg:SI Re)
827 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
828 (set (reg:SI FP_REGNUM)
829 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
830 (set (reg:SI GP_REGNUM)
831 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
832 (set (reg:SI LP_REGNUM)
833 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
834 (set (reg:SI SP_REGNUM)
835 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
836
837 /* Calculate the number of registers that will be poped.
838 Since $fp, $gp, and $lp is always poped with v3pop instruction,
839 we need to count these three registers.
840 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
841 So there is no need to worry about Rb=Re=SP_REGNUM case. */
842 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
843
844 /* In addition to used registers,
845 we need one more space for (set sp sp+x+imm8u) rtx. */
846 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
847 rtvec_alloc (num_use_regs + 1));
848 par_index = 0;
849
850 /* Initialize offset and start to create pop behavior. */
851 offset = 0;
852
853 /* Create (set regX mem) from Rb, Rb+1 up to Re.
854 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
855 So there is no need to worry about Rb=Re=SP_REGNUM case. */
856 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
857 {
858 reg = gen_rtx_REG (SImode, regno);
859 mem = gen_frame_mem (SImode, plus_constant (Pmode,
860 stack_pointer_rtx,
861 offset));
862 pop_rtx = gen_rtx_SET (reg, mem);
863 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
864 RTX_FRAME_RELATED_P (pop_rtx) = 1;
865 offset = offset + 4;
866 par_index++;
867
868 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
869 }
870
871 /* Create (set fp mem). */
872 reg = gen_rtx_REG (SImode, FP_REGNUM);
873 mem = gen_frame_mem (SImode, plus_constant (Pmode,
874 stack_pointer_rtx,
875 offset));
876 pop_rtx = gen_rtx_SET (reg, mem);
877 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
878 RTX_FRAME_RELATED_P (pop_rtx) = 1;
879 offset = offset + 4;
880 par_index++;
881 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
882
883 /* Create (set gp mem). */
884 reg = gen_rtx_REG (SImode, GP_REGNUM);
885 mem = gen_frame_mem (SImode, plus_constant (Pmode,
886 stack_pointer_rtx,
887 offset));
888 pop_rtx = gen_rtx_SET (reg, mem);
889 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
890 RTX_FRAME_RELATED_P (pop_rtx) = 1;
891 offset = offset + 4;
892 par_index++;
893 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
894
895 /* Create (set lp mem ). */
896 reg = gen_rtx_REG (SImode, LP_REGNUM);
897 mem = gen_frame_mem (SImode, plus_constant (Pmode,
898 stack_pointer_rtx,
899 offset));
900 pop_rtx = gen_rtx_SET (reg, mem);
901 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
902 RTX_FRAME_RELATED_P (pop_rtx) = 1;
903 offset = offset + 4;
904 par_index++;
905 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
906
907 /* Create (set sp sp+x+imm8u). */
908
909 /* The offset value is already in place. No need to re-calculate it. */
910 adjust_sp_rtx
911 = gen_rtx_SET (stack_pointer_rtx,
912 plus_constant (Pmode,
913 stack_pointer_rtx,
914 offset + INTVAL (imm8u)));
915 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
916
917 /* Tell gcc we adjust SP in this insn. */
918 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
919
920 parallel_insn = emit_insn (parallel_insn);
921
922 /* The insn rtx 'parallel_insn' will change frame layout.
923 We need to use RTX_FRAME_RELATED_P so that GCC is able to
924 generate CFI (Call Frame Information) stuff. */
925 RTX_FRAME_RELATED_P (parallel_insn) = 1;
926
927 /* Add CFI info by manual. */
928 REG_NOTES (parallel_insn) = dwarf;
929 }
930
931 /* Function that may creates more instructions
932 for large value on adjusting stack pointer.
933
934 In nds32 target, 'addi' can be used for stack pointer
935 adjustment in prologue/epilogue stage.
936 However, sometimes there are too many local variables so that
937 the adjustment value is not able to be fit in the 'addi' instruction.
938 One solution is to move value into a register
939 and then use 'add' instruction.
940 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
941 Also, we need to return zero for sp adjustment so that
942 proglogue/epilogue knows there is no need to create 'addi' instruction. */
943 static int
944 nds32_force_addi_stack_int (int full_value)
945 {
946 int adjust_value;
947
948 rtx tmp_reg;
949 rtx sp_adjust_insn;
950
951 if (!satisfies_constraint_Is15 (GEN_INT (full_value)))
952 {
953 /* The value is not able to fit in single addi instruction.
954 Create more instructions of moving value into a register
955 and then add stack pointer with it. */
956
957 /* $r15 is going to be temporary register to hold the value. */
958 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
959
960 /* Create one more instruction to move value
961 into the temporary register. */
962 emit_move_insn (tmp_reg, GEN_INT (full_value));
963
964 /* Create new 'add' rtx. */
965 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
966 stack_pointer_rtx,
967 tmp_reg);
968 /* Emit rtx into insn list and receive its transformed insn rtx. */
969 sp_adjust_insn = emit_insn (sp_adjust_insn);
970
971 /* At prologue, we need to tell GCC that this is frame related insn,
972 so that we can consider this instruction to output debug information.
973 If full_value is NEGATIVE, it means this function
974 is invoked by expand_prologue. */
975 if (full_value < 0)
976 {
977 /* Because (tmp_reg <- full_value) may be split into two
978 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
979 We need to construct another (sp <- sp + full_value)
980 and then insert it into sp_adjust_insn's reg note to
981 represent a frame related expression.
982 GCC knows how to refer it and output debug information. */
983
984 rtx plus_rtx;
985 rtx set_rtx;
986
987 plus_rtx = plus_constant (Pmode, stack_pointer_rtx, full_value);
988 set_rtx = gen_rtx_SET (stack_pointer_rtx, plus_rtx);
989 add_reg_note (sp_adjust_insn, REG_FRAME_RELATED_EXPR, set_rtx);
990
991 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
992 }
993
994 /* We have used alternative way to adjust stack pointer value.
995 Return zero so that prologue/epilogue
996 will not generate other instructions. */
997 return 0;
998 }
999 else
1000 {
1001 /* The value is able to fit in addi instruction.
1002 However, remember to make it to be positive value
1003 because we want to return 'adjustment' result. */
1004 adjust_value = (full_value < 0) ? (-full_value) : (full_value);
1005
1006 return adjust_value;
1007 }
1008 }
1009
1010 /* Return true if MODE/TYPE need double word alignment. */
1011 static bool
1012 nds32_needs_double_word_align (machine_mode mode, const_tree type)
1013 {
1014 unsigned int align;
1015
1016 /* Pick up the alignment according to the mode or type. */
1017 align = NDS32_MODE_TYPE_ALIGN (mode, type);
1018
1019 return (align > PARM_BOUNDARY);
1020 }
1021
1022 /* Return true if FUNC is a naked function. */
1023 static bool
1024 nds32_naked_function_p (tree func)
1025 {
1026 tree t;
1027
1028 if (TREE_CODE (func) != FUNCTION_DECL)
1029 abort ();
1030
1031 t = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
1032
1033 return (t != NULL_TREE);
1034 }
1035
1036 /* Function that check if 'X' is a valid address register.
1037 The variable 'STRICT' is very important to
1038 make decision for register number.
1039
1040 STRICT : true
1041 => We are in reload pass or after reload pass.
1042 The register number should be strictly limited in general registers.
1043
1044 STRICT : false
1045 => Before reload pass, we are free to use any register number. */
1046 static bool
1047 nds32_address_register_rtx_p (rtx x, bool strict)
1048 {
1049 int regno;
1050
1051 if (GET_CODE (x) != REG)
1052 return false;
1053
1054 regno = REGNO (x);
1055
1056 if (strict)
1057 return REGNO_OK_FOR_BASE_P (regno);
1058 else
1059 return true;
1060 }
1061
1062 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1063
1064 OUTER_MODE : Machine mode of outer address rtx.
1065 INDEX : Check if this rtx is valid to be a index for address.
1066 STRICT : If it is true, we are in reload pass or after reload pass. */
1067 static bool
1068 nds32_legitimate_index_p (machine_mode outer_mode,
1069 rtx index,
1070 bool strict)
1071 {
1072 int regno;
1073 rtx op0;
1074 rtx op1;
1075
1076 switch (GET_CODE (index))
1077 {
1078 case REG:
1079 regno = REGNO (index);
1080 /* If we are in reload pass or after reload pass,
1081 we need to limit it to general register. */
1082 if (strict)
1083 return REGNO_OK_FOR_INDEX_P (regno);
1084 else
1085 return true;
1086
1087 case CONST_INT:
1088 /* The alignment of the integer value is determined by 'outer_mode'. */
1089 if (GET_MODE_SIZE (outer_mode) == 1)
1090 {
1091 /* Further check if the value is legal for the 'outer_mode'. */
1092 if (!satisfies_constraint_Is15 (index))
1093 return false;
1094
1095 /* Pass all test, the value is valid, return true. */
1096 return true;
1097 }
1098 if (GET_MODE_SIZE (outer_mode) == 2
1099 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index)))
1100 {
1101 /* Further check if the value is legal for the 'outer_mode'. */
1102 if (!satisfies_constraint_Is16 (index))
1103 return false;
1104
1105 /* Pass all test, the value is valid, return true. */
1106 return true;
1107 }
1108 if (GET_MODE_SIZE (outer_mode) == 4
1109 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1110 {
1111 /* Further check if the value is legal for the 'outer_mode'. */
1112 if (!satisfies_constraint_Is17 (index))
1113 return false;
1114
1115 /* Pass all test, the value is valid, return true. */
1116 return true;
1117 }
1118 if (GET_MODE_SIZE (outer_mode) == 8
1119 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1120 {
1121 /* Further check if the value is legal for the 'outer_mode'. */
1122 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index) + 4,
1123 SImode)))
1124 return false;
1125
1126 /* Pass all test, the value is valid, return true. */
1127 return true;
1128 }
1129
1130 return false;
1131
1132 case MULT:
1133 op0 = XEXP (index, 0);
1134 op1 = XEXP (index, 1);
1135
1136 if (REG_P (op0) && CONST_INT_P (op1))
1137 {
1138 int multiplier;
1139 multiplier = INTVAL (op1);
1140
1141 /* We only allow (mult reg const_int_1)
1142 or (mult reg const_int_2) or (mult reg const_int_4). */
1143 if (multiplier != 1 && multiplier != 2 && multiplier != 4)
1144 return false;
1145
1146 regno = REGNO (op0);
1147 /* Limit it in general registers if we are
1148 in reload pass or after reload pass. */
1149 if(strict)
1150 return REGNO_OK_FOR_INDEX_P (regno);
1151 else
1152 return true;
1153 }
1154
1155 return false;
1156
1157 case ASHIFT:
1158 op0 = XEXP (index, 0);
1159 op1 = XEXP (index, 1);
1160
1161 if (REG_P (op0) && CONST_INT_P (op1))
1162 {
1163 int sv;
1164 /* op1 is already the sv value for use to do left shift. */
1165 sv = INTVAL (op1);
1166
1167 /* We only allow (ashift reg const_int_0)
1168 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1169 if (sv != 0 && sv != 1 && sv !=2)
1170 return false;
1171
1172 regno = REGNO (op0);
1173 /* Limit it in general registers if we are
1174 in reload pass or after reload pass. */
1175 if(strict)
1176 return REGNO_OK_FOR_INDEX_P (regno);
1177 else
1178 return true;
1179 }
1180
1181 return false;
1182
1183 default:
1184 return false;
1185 }
1186 }
1187
1188 /* ------------------------------------------------------------------------ */
1189
1190 /* PART 3: Implement target hook stuff definitions. */
1191 \f
1192 /* Register Classes. */
1193
1194 static unsigned char
1195 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1196 machine_mode mode)
1197 {
1198 /* Return the maximum number of consecutive registers
1199 needed to represent "mode" in a register of "rclass". */
1200 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
1201 }
1202
1203 static int
1204 nds32_register_priority (int hard_regno)
1205 {
1206 /* Encourage to use r0-r7 for LRA when optimize for size. */
1207 if (optimize_size && hard_regno < 8)
1208 return 4;
1209 return 3;
1210 }
1211
1212 \f
1213 /* Stack Layout and Calling Conventions. */
1214
1215 /* There are three kinds of pointer concepts using in GCC compiler:
1216
1217 frame pointer: A pointer to the first location of local variables.
1218 stack pointer: A pointer to the top of a stack frame.
1219 argument pointer: A pointer to the incoming arguments.
1220
1221 In nds32 target calling convention, we are using 8-byte alignment.
1222 Besides, we would like to have each stack frame of a function includes:
1223
1224 [Block A]
1225 1. previous hard frame pointer
1226 2. return address
1227 3. callee-saved registers
1228 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1229 and save it at
1230 cfun->machine->callee_saved_area_padding_bytes)
1231
1232 [Block B]
1233 1. local variables
1234 2. spilling location
1235 3. <padding bytes> (it will be calculated by GCC itself)
1236 4. incoming arguments
1237 5. <padding bytes> (it will be calculated by GCC itself)
1238
1239 [Block C]
1240 1. <padding bytes> (it will be calculated by GCC itself)
1241 2. outgoing arguments
1242
1243 We 'wrap' these blocks together with
1244 hard frame pointer ($r28) and stack pointer ($r31).
1245 By applying the basic frame/stack/argument pointers concept,
1246 the layout of a stack frame shoule be like this:
1247
1248 | |
1249 old stack pointer -> ----
1250 | | \
1251 | | saved arguments for
1252 | | vararg functions
1253 | | /
1254 hard frame pointer -> --
1255 & argument pointer | | \
1256 | | previous hardware frame pointer
1257 | | return address
1258 | | callee-saved registers
1259 | | /
1260 frame pointer -> --
1261 | | \
1262 | | local variables
1263 | | and incoming arguments
1264 | | /
1265 --
1266 | | \
1267 | | outgoing
1268 | | arguments
1269 | | /
1270 stack pointer -> ----
1271
1272 $SFP and $AP are used to represent frame pointer and arguments pointer,
1273 which will be both eliminated as hard frame pointer. */
1274
1275 /* -- Eliminating Frame Pointer and Arg Pointer. */
1276
1277 static bool
1278 nds32_can_eliminate (const int from_reg, const int to_reg)
1279 {
1280 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1281 return true;
1282
1283 if (from_reg == ARG_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1284 return true;
1285
1286 if (from_reg == FRAME_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1287 return true;
1288
1289 if (from_reg == FRAME_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1290 return true;
1291
1292 return false;
1293 }
1294
1295 /* -- Passing Arguments in Registers. */
1296
1297 static rtx
1298 nds32_function_arg (cumulative_args_t ca, machine_mode mode,
1299 const_tree type, bool named)
1300 {
1301 unsigned int regno;
1302 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1303
1304 /* The last time this hook is called,
1305 it is called with MODE == VOIDmode. */
1306 if (mode == VOIDmode)
1307 return NULL_RTX;
1308
1309 /* For nameless arguments, we need to take care it individually. */
1310 if (!named)
1311 {
1312 /* If we are under hard float abi, we have arguments passed on the
1313 stack and all situation can be handled by GCC itself. */
1314 if (TARGET_HARD_FLOAT)
1315 return NULL_RTX;
1316
1317 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1318 {
1319 /* If we still have enough registers to pass argument, pick up
1320 next available register number. */
1321 regno
1322 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1323 return gen_rtx_REG (mode, regno);
1324 }
1325
1326 /* No register available, return NULL_RTX.
1327 The compiler will use stack to pass argument instead. */
1328 return NULL_RTX;
1329 }
1330
1331 /* The following is to handle named argument.
1332 Note that the strategies of TARGET_HARD_FLOAT and !TARGET_HARD_FLOAT
1333 are different. */
1334 if (TARGET_HARD_FLOAT)
1335 {
1336 /* Currently we have not implemented hard float yet. */
1337 gcc_unreachable ();
1338 }
1339 else
1340 {
1341 /* For !TARGET_HARD_FLOAT calling convention, we always use GPR to pass
1342 argument. Since we allow to pass argument partially in registers,
1343 we can just return it if there are still registers available. */
1344 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1345 {
1346 /* Pick up the next available register number. */
1347 regno
1348 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1349 return gen_rtx_REG (mode, regno);
1350 }
1351
1352 }
1353
1354 /* No register available, return NULL_RTX.
1355 The compiler will use stack to pass argument instead. */
1356 return NULL_RTX;
1357 }
1358
1359 static bool
1360 nds32_must_pass_in_stack (machine_mode mode, const_tree type)
1361 {
1362 /* Return true if a type must be passed in memory.
1363 If it is NOT using hard float abi, small aggregates can be
1364 passed in a register even we are calling a variadic function.
1365 So there is no need to take padding into consideration. */
1366 if (TARGET_HARD_FLOAT)
1367 return must_pass_in_stack_var_size_or_pad (mode, type);
1368 else
1369 return must_pass_in_stack_var_size (mode, type);
1370 }
1371
1372 static int
1373 nds32_arg_partial_bytes (cumulative_args_t ca, machine_mode mode,
1374 tree type, bool named ATTRIBUTE_UNUSED)
1375 {
1376 /* Returns the number of bytes at the beginning of an argument that
1377 must be put in registers. The value must be zero for arguments that are
1378 passed entirely in registers or that are entirely pushed on the stack.
1379 Besides, TARGET_FUNCTION_ARG for these arguments should return the
1380 first register to be used by the caller for this argument. */
1381 unsigned int needed_reg_count;
1382 unsigned int remaining_reg_count;
1383 CUMULATIVE_ARGS *cum;
1384
1385 cum = get_cumulative_args (ca);
1386
1387 /* Under hard float abi, we better have argument entirely passed in
1388 registers or pushed on the stack so that we can reduce the complexity
1389 of dealing with cum->gpr_offset and cum->fpr_offset. */
1390 if (TARGET_HARD_FLOAT)
1391 return 0;
1392
1393 /* If we have already runned out of argument registers, return zero
1394 so that the argument will be entirely pushed on the stack. */
1395 if (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1396 >= NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS)
1397 return 0;
1398
1399 /* Calculate how many registers do we need for this argument. */
1400 needed_reg_count = NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1401
1402 /* Calculate how many argument registers have left for passing argument.
1403 Note that we should count it from next available register number. */
1404 remaining_reg_count
1405 = NDS32_MAX_GPR_REGS_FOR_ARGS
1406 - (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1407 - NDS32_GPR_ARG_FIRST_REGNUM);
1408
1409 /* Note that we have to return the nubmer of bytes, not registers count. */
1410 if (needed_reg_count > remaining_reg_count)
1411 return remaining_reg_count * UNITS_PER_WORD;
1412
1413 return 0;
1414 }
1415
1416 static void
1417 nds32_function_arg_advance (cumulative_args_t ca, machine_mode mode,
1418 const_tree type, bool named)
1419 {
1420 machine_mode sub_mode;
1421 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1422
1423 if (named)
1424 {
1425 /* We need to further check TYPE and MODE so that we can determine
1426 which kind of register we shall advance. */
1427 if (type && TREE_CODE (type) == COMPLEX_TYPE)
1428 sub_mode = TYPE_MODE (TREE_TYPE (type));
1429 else
1430 sub_mode = mode;
1431
1432 /* Under hard float abi, we may advance FPR registers. */
1433 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (sub_mode) == MODE_FLOAT)
1434 {
1435 /* Currently we have not implemented hard float yet. */
1436 gcc_unreachable ();
1437 }
1438 else
1439 {
1440 cum->gpr_offset
1441 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1442 - NDS32_GPR_ARG_FIRST_REGNUM
1443 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1444 }
1445 }
1446 else
1447 {
1448 /* If this nameless argument is NOT under TARGET_HARD_FLOAT,
1449 we can advance next register as well so that caller is
1450 able to pass arguments in registers and callee must be
1451 in charge of pushing all of them into stack. */
1452 if (!TARGET_HARD_FLOAT)
1453 {
1454 cum->gpr_offset
1455 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1456 - NDS32_GPR_ARG_FIRST_REGNUM
1457 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1458 }
1459 }
1460 }
1461
1462 static unsigned int
1463 nds32_function_arg_boundary (machine_mode mode, const_tree type)
1464 {
1465 return (nds32_needs_double_word_align (mode, type)
1466 ? NDS32_DOUBLE_WORD_ALIGNMENT
1467 : PARM_BOUNDARY);
1468 }
1469
1470 /* -- How Scalar Function Values Are Returned. */
1471
1472 static rtx
1473 nds32_function_value (const_tree ret_type,
1474 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1475 bool outgoing ATTRIBUTE_UNUSED)
1476 {
1477 machine_mode mode;
1478 int unsignedp;
1479
1480 mode = TYPE_MODE (ret_type);
1481 unsignedp = TYPE_UNSIGNED (ret_type);
1482
1483 mode = promote_mode (ret_type, mode, &unsignedp);
1484
1485 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1486 }
1487
1488 static rtx
1489 nds32_libcall_value (machine_mode mode,
1490 const_rtx fun ATTRIBUTE_UNUSED)
1491 {
1492 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1493 }
1494
1495 static bool
1496 nds32_function_value_regno_p (const unsigned int regno)
1497 {
1498 return (regno == NDS32_GPR_RET_FIRST_REGNUM);
1499 }
1500
1501 /* -- Function Entry and Exit. */
1502
1503 /* The content produced from this function
1504 will be placed before prologue body. */
1505 static void
1506 nds32_asm_function_prologue (FILE *file)
1507 {
1508 int r;
1509 const char *func_name;
1510 tree attrs;
1511 tree name;
1512
1513 /* All stack frame information is supposed to be
1514 already computed when expanding prologue.
1515 The result is in cfun->machine.
1516 DO NOT call nds32_compute_stack_frame() here
1517 because it may corrupt the essential information. */
1518
1519 fprintf (file, "\t! BEGIN PROLOGUE\n");
1520 fprintf (file, "\t! fp needed: %d\n", frame_pointer_needed);
1521 fprintf (file, "\t! pretend_args: %d\n", cfun->machine->va_args_size);
1522 fprintf (file, "\t! local_size: %d\n", cfun->machine->local_size);
1523 fprintf (file, "\t! out_args_size: %d\n", cfun->machine->out_args_size);
1524
1525 /* Use df_regs_ever_live_p() to detect if the register
1526 is ever used in the current function. */
1527 fprintf (file, "\t! registers ever_live: ");
1528 for (r = 0; r < 32; r++)
1529 {
1530 if (df_regs_ever_live_p (r))
1531 fprintf (file, "%s, ", reg_names[r]);
1532 }
1533 fputc ('\n', file);
1534
1535 /* Display the attributes of this function. */
1536 fprintf (file, "\t! function attributes: ");
1537 /* Get the attributes tree list.
1538 Note that GCC builds attributes list with reverse order. */
1539 attrs = DECL_ATTRIBUTES (current_function_decl);
1540
1541 /* If there is no any attribute, print out "None". */
1542 if (!attrs)
1543 fprintf (file, "None");
1544
1545 /* If there are some attributes, try if we need to
1546 construct isr vector information. */
1547 func_name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
1548 nds32_construct_isr_vectors_information (attrs, func_name);
1549
1550 /* Display all attributes of this function. */
1551 while (attrs)
1552 {
1553 name = TREE_PURPOSE (attrs);
1554 fprintf (file, "%s ", IDENTIFIER_POINTER (name));
1555
1556 /* Pick up the next attribute. */
1557 attrs = TREE_CHAIN (attrs);
1558 }
1559 fputc ('\n', file);
1560 }
1561
1562 /* After rtl prologue has been expanded, this function is used. */
1563 static void
1564 nds32_asm_function_end_prologue (FILE *file)
1565 {
1566 fprintf (file, "\t! END PROLOGUE\n");
1567
1568 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1569 we can generate special directive: ".omit_fp_begin"
1570 to guide linker doing fp-as-gp optimization.
1571 However, for a naked function, which means
1572 it should not have prologue/epilogue,
1573 using fp-as-gp still requires saving $fp by push/pop behavior and
1574 there is no benefit to use fp-as-gp on such small function.
1575 So we need to make sure this function is NOT naked as well. */
1576 if (!frame_pointer_needed
1577 && !cfun->machine->naked_p
1578 && cfun->machine->fp_as_gp_p)
1579 {
1580 fprintf (file, "\t! ----------------------------------------\n");
1581 fprintf (file, "\t! Guide linker to do "
1582 "link time optimization: fp-as-gp\n");
1583 fprintf (file, "\t! We add one more instruction to "
1584 "initialize $fp near to $gp location.\n");
1585 fprintf (file, "\t! If linker fails to use fp-as-gp transformation,\n");
1586 fprintf (file, "\t! this extra instruction should be "
1587 "eliminated at link stage.\n");
1588 fprintf (file, "\t.omit_fp_begin\n");
1589 fprintf (file, "\tla\t$fp,_FP_BASE_\n");
1590 fprintf (file, "\t! ----------------------------------------\n");
1591 }
1592 }
1593
1594 /* Before rtl epilogue has been expanded, this function is used. */
1595 static void
1596 nds32_asm_function_begin_epilogue (FILE *file)
1597 {
1598 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1599 we can generate special directive: ".omit_fp_end"
1600 to claim fp-as-gp optimization range.
1601 However, for a naked function,
1602 which means it should not have prologue/epilogue,
1603 using fp-as-gp still requires saving $fp by push/pop behavior and
1604 there is no benefit to use fp-as-gp on such small function.
1605 So we need to make sure this function is NOT naked as well. */
1606 if (!frame_pointer_needed
1607 && !cfun->machine->naked_p
1608 && cfun->machine->fp_as_gp_p)
1609 {
1610 fprintf (file, "\t! ----------------------------------------\n");
1611 fprintf (file, "\t! Claim the range of fp-as-gp "
1612 "link time optimization\n");
1613 fprintf (file, "\t.omit_fp_end\n");
1614 fprintf (file, "\t! ----------------------------------------\n");
1615 }
1616
1617 fprintf (file, "\t! BEGIN EPILOGUE\n");
1618 }
1619
1620 /* The content produced from this function
1621 will be placed after epilogue body. */
1622 static void
1623 nds32_asm_function_epilogue (FILE *file)
1624 {
1625 fprintf (file, "\t! END EPILOGUE\n");
1626 }
1627
1628 static void
1629 nds32_asm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
1630 HOST_WIDE_INT delta,
1631 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
1632 tree function)
1633 {
1634 int this_regno;
1635
1636 /* Make sure unwind info is emitted for the thunk if needed. */
1637 final_start_function (emit_barrier (), file, 1);
1638
1639 this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
1640 ? 1
1641 : 0);
1642
1643 if (delta != 0)
1644 {
1645 if (satisfies_constraint_Is15 (GEN_INT (delta)))
1646 {
1647 fprintf (file, "\taddi\t$r%d, $r%d, %ld\n",
1648 this_regno, this_regno, delta);
1649 }
1650 else if (satisfies_constraint_Is20 (GEN_INT (delta)))
1651 {
1652 fprintf (file, "\tmovi\t$ta, %ld\n", delta);
1653 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1654 }
1655 else
1656 {
1657 fprintf (file, "\tsethi\t$ta, hi20(%ld)\n", delta);
1658 fprintf (file, "\tori\t$ta, $ta, lo12(%ld)\n", delta);
1659 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1660 }
1661 }
1662
1663 fprintf (file, "\tb\t");
1664 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
1665 fprintf (file, "\n");
1666
1667 final_end_function ();
1668 }
1669
1670 /* -- Permitting tail calls. */
1671
1672 /* Determine whether we need to enable warning for function return check. */
1673 static bool
1674 nds32_warn_func_return (tree decl)
1675 {
1676 /* Naked functions are implemented entirely in assembly, including the
1677 return sequence, so suppress warnings about this. */
1678 return !nds32_naked_function_p (decl);
1679 }
1680
1681 \f
1682 /* Implementing the Varargs Macros. */
1683
1684 static void
1685 nds32_setup_incoming_varargs (cumulative_args_t ca,
1686 machine_mode mode,
1687 tree type,
1688 int *pretend_args_size,
1689 int second_time ATTRIBUTE_UNUSED)
1690 {
1691 unsigned int total_args_regs;
1692 unsigned int num_of_used_regs;
1693 unsigned int remaining_reg_count;
1694 CUMULATIVE_ARGS *cum;
1695
1696 /* If we are under hard float abi, we do not need to set *pretend_args_size.
1697 So that all nameless arguments are pushed by caller and all situation
1698 can be handled by GCC itself. */
1699 if (TARGET_HARD_FLOAT)
1700 return;
1701
1702 /* We are using NDS32_MAX_GPR_REGS_FOR_ARGS registers,
1703 counting from NDS32_GPR_ARG_FIRST_REGNUM, for saving incoming arguments.
1704 However, for nameless(anonymous) arguments, we should push them on the
1705 stack so that all the nameless arguments appear to have been passed
1706 consecutively in the memory for accessing. Hence, we need to check and
1707 exclude the registers that are used for named arguments. */
1708
1709 cum = get_cumulative_args (ca);
1710
1711 /* The MODE and TYPE describe the last argument.
1712 We need those information to determine the remaining registers
1713 for varargs. */
1714 total_args_regs
1715 = NDS32_MAX_GPR_REGS_FOR_ARGS + NDS32_GPR_ARG_FIRST_REGNUM;
1716 num_of_used_regs
1717 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1718 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1719
1720 remaining_reg_count = total_args_regs - num_of_used_regs;
1721 *pretend_args_size = remaining_reg_count * UNITS_PER_WORD;
1722
1723 return;
1724 }
1725
1726 static bool
1727 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1728 {
1729 /* If this hook returns true, the named argument of FUNCTION_ARG is always
1730 true for named arguments, and false for unnamed arguments. */
1731 return true;
1732 }
1733
1734 \f
1735 /* Trampolines for Nested Functions. */
1736
1737 static void
1738 nds32_asm_trampoline_template (FILE *f)
1739 {
1740 if (TARGET_REDUCED_REGS)
1741 {
1742 /* Trampoline is not supported on reduced-set registers yet. */
1743 sorry ("a nested function is not supported for reduced registers");
1744 }
1745 else
1746 {
1747 asm_fprintf (f, "\t! Trampoline code template\n");
1748 asm_fprintf (f, "\t! This code fragment will be copied "
1749 "into stack on demand\n");
1750
1751 asm_fprintf (f, "\tmfusr\t$r16,$pc\n");
1752 asm_fprintf (f, "\tlwi\t$r15,[$r16 + 20] "
1753 "! load nested function address\n");
1754 asm_fprintf (f, "\tlwi\t$r16,[$r16 + 16] "
1755 "! load chain_value\n");
1756 asm_fprintf (f, "\tjr\t$r15\n");
1757 }
1758
1759 /* Preserve space ($pc + 16) for saving chain_value,
1760 nds32_trampoline_init will fill the value in this slot. */
1761 asm_fprintf (f, "\t! space for saving chain_value\n");
1762 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1763
1764 /* Preserve space ($pc + 20) for saving nested function address,
1765 nds32_trampoline_init will fill the value in this slot. */
1766 asm_fprintf (f, "\t! space for saving nested function address\n");
1767 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1768 }
1769
1770 /* Emit RTL insns to initialize the variable parts of a trampoline. */
1771 static void
1772 nds32_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
1773 {
1774 int i;
1775
1776 /* Nested function address. */
1777 rtx fnaddr;
1778 /* The memory rtx that is going to
1779 be filled with chain_value. */
1780 rtx chain_value_mem;
1781 /* The memory rtx that is going to
1782 be filled with nested function address. */
1783 rtx nested_func_mem;
1784
1785 /* Start address of trampoline code in stack, for doing cache sync. */
1786 rtx sync_cache_addr;
1787 /* Temporary register for sync instruction. */
1788 rtx tmp_reg;
1789 /* Instruction-cache sync instruction,
1790 requesting an argument as starting address. */
1791 rtx isync_insn;
1792 /* For convenience reason of doing comparison. */
1793 int tramp_align_in_bytes;
1794
1795 /* Trampoline is not supported on reduced-set registers yet. */
1796 if (TARGET_REDUCED_REGS)
1797 sorry ("a nested function is not supported for reduced registers");
1798
1799 /* STEP 1: Copy trampoline code template into stack,
1800 fill up essential data into stack. */
1801
1802 /* Extract nested function address rtx. */
1803 fnaddr = XEXP (DECL_RTL (fndecl), 0);
1804
1805 /* m_tramp is memory rtx that is going to be filled with trampoline code.
1806 We have nds32_asm_trampoline_template() to emit template pattern. */
1807 emit_block_move (m_tramp, assemble_trampoline_template (),
1808 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
1809
1810 /* After copying trampoline code into stack,
1811 fill chain_value into stack. */
1812 chain_value_mem = adjust_address (m_tramp, SImode, 16);
1813 emit_move_insn (chain_value_mem, chain_value);
1814 /* After copying trampoline code int stack,
1815 fill nested function address into stack. */
1816 nested_func_mem = adjust_address (m_tramp, SImode, 20);
1817 emit_move_insn (nested_func_mem, fnaddr);
1818
1819 /* STEP 2: Sync instruction-cache. */
1820
1821 /* We have successfully filled trampoline code into stack.
1822 However, in order to execute code in stack correctly,
1823 we must sync instruction cache. */
1824 sync_cache_addr = XEXP (m_tramp, 0);
1825 tmp_reg = gen_reg_rtx (SImode);
1826 isync_insn = gen_unspec_volatile_isync (tmp_reg);
1827
1828 /* Because nds32_cache_block_size is in bytes,
1829 we get trampoline alignment in bytes for convenient comparison. */
1830 tramp_align_in_bytes = TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT;
1831
1832 if (tramp_align_in_bytes >= nds32_cache_block_size
1833 && (tramp_align_in_bytes % nds32_cache_block_size) == 0)
1834 {
1835 /* Under this condition, the starting address of trampoline
1836 must be aligned to the starting address of each cache block
1837 and we do not have to worry about cross-boundary issue. */
1838 for (i = 0;
1839 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1840 / nds32_cache_block_size;
1841 i++)
1842 {
1843 emit_move_insn (tmp_reg,
1844 plus_constant (Pmode, sync_cache_addr,
1845 nds32_cache_block_size * i));
1846 emit_insn (isync_insn);
1847 }
1848 }
1849 else if (TRAMPOLINE_SIZE > nds32_cache_block_size)
1850 {
1851 /* The starting address of trampoline code
1852 may not be aligned to the cache block,
1853 so the trampoline code may be across two cache block.
1854 We need to sync the last element, which is 4-byte size,
1855 of trampoline template. */
1856 for (i = 0;
1857 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1858 / nds32_cache_block_size;
1859 i++)
1860 {
1861 emit_move_insn (tmp_reg,
1862 plus_constant (Pmode, sync_cache_addr,
1863 nds32_cache_block_size * i));
1864 emit_insn (isync_insn);
1865 }
1866
1867 /* The last element of trampoline template is 4-byte size. */
1868 emit_move_insn (tmp_reg,
1869 plus_constant (Pmode, sync_cache_addr,
1870 TRAMPOLINE_SIZE - 4));
1871 emit_insn (isync_insn);
1872 }
1873 else
1874 {
1875 /* This is the simplest case.
1876 Because TRAMPOLINE_SIZE is less than or
1877 equal to nds32_cache_block_size,
1878 we can just sync start address and
1879 the last element of trampoline code. */
1880
1881 /* Sync starting address of tampoline code. */
1882 emit_move_insn (tmp_reg, sync_cache_addr);
1883 emit_insn (isync_insn);
1884 /* Sync the last element, which is 4-byte size,
1885 of trampoline template. */
1886 emit_move_insn (tmp_reg,
1887 plus_constant (Pmode, sync_cache_addr,
1888 TRAMPOLINE_SIZE - 4));
1889 emit_insn (isync_insn);
1890 }
1891
1892 /* Set instruction serialization barrier
1893 to guarantee the correct operations. */
1894 emit_insn (gen_unspec_volatile_isb ());
1895 }
1896
1897 \f
1898 /* Addressing Modes. */
1899
1900 static bool
1901 nds32_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1902 {
1903 /* For (mem:DI addr) or (mem:DF addr) case,
1904 we only allow 'addr' to be [reg], [symbol_ref],
1905 [const], or [reg + const_int] pattern. */
1906 if (mode == DImode || mode == DFmode)
1907 {
1908 /* Allow [Reg + const_int] addressing mode. */
1909 if (GET_CODE (x) == PLUS)
1910 {
1911 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
1912 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict)
1913 && CONST_INT_P (XEXP (x, 1)))
1914 return true;
1915 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
1916 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict)
1917 && CONST_INT_P (XEXP (x, 0)))
1918 return true;
1919 }
1920
1921 /* Now check [reg], [symbol_ref], and [const]. */
1922 if (GET_CODE (x) != REG
1923 && GET_CODE (x) != SYMBOL_REF
1924 && GET_CODE (x) != CONST)
1925 return false;
1926 }
1927
1928 /* Check if 'x' is a valid address. */
1929 switch (GET_CODE (x))
1930 {
1931 case REG:
1932 /* (mem (reg A)) => [Ra] */
1933 return nds32_address_register_rtx_p (x, strict);
1934
1935 case SYMBOL_REF:
1936 /* (mem (symbol_ref A)) => [symbol_ref] */
1937 /* If -mcmodel=large, the 'symbol_ref' is not a valid address
1938 during or after LRA/reload phase. */
1939 if (TARGET_CMODEL_LARGE
1940 && (reload_completed
1941 || reload_in_progress
1942 || lra_in_progress))
1943 return false;
1944 /* If -mcmodel=medium and the symbol references to rodata section,
1945 the 'symbol_ref' is not a valid address during or after
1946 LRA/reload phase. */
1947 if (TARGET_CMODEL_MEDIUM
1948 && NDS32_SYMBOL_REF_RODATA_P (x)
1949 && (reload_completed
1950 || reload_in_progress
1951 || lra_in_progress))
1952 return false;
1953
1954 return true;
1955
1956 case CONST:
1957 /* (mem (const (...)))
1958 => [ + const_addr ], where const_addr = symbol_ref + const_int */
1959 if (GET_CODE (XEXP (x, 0)) == PLUS)
1960 {
1961 rtx plus_op = XEXP (x, 0);
1962
1963 rtx op0 = XEXP (plus_op, 0);
1964 rtx op1 = XEXP (plus_op, 1);
1965
1966 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
1967 {
1968 /* Now we see the [ + const_addr ] pattern, but we need
1969 some further checking. */
1970 /* If -mcmodel=large, the 'const_addr' is not a valid address
1971 during or after LRA/reload phase. */
1972 if (TARGET_CMODEL_LARGE
1973 && (reload_completed
1974 || reload_in_progress
1975 || lra_in_progress))
1976 return false;
1977 /* If -mcmodel=medium and the symbol references to rodata section,
1978 the 'const_addr' is not a valid address during or after
1979 LRA/reload phase. */
1980 if (TARGET_CMODEL_MEDIUM
1981 && NDS32_SYMBOL_REF_RODATA_P (op0)
1982 && (reload_completed
1983 || reload_in_progress
1984 || lra_in_progress))
1985 return false;
1986
1987 /* At this point we can make sure 'const_addr' is a
1988 valid address. */
1989 return true;
1990 }
1991 }
1992
1993 return false;
1994
1995 case POST_MODIFY:
1996 /* (mem (post_modify (reg) (plus (reg) (reg))))
1997 => [Ra], Rb */
1998 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1999 => [Ra], const_int */
2000 if (GET_CODE (XEXP (x, 0)) == REG
2001 && GET_CODE (XEXP (x, 1)) == PLUS)
2002 {
2003 rtx plus_op = XEXP (x, 1);
2004
2005 rtx op0 = XEXP (plus_op, 0);
2006 rtx op1 = XEXP (plus_op, 1);
2007
2008 if (nds32_address_register_rtx_p (op0, strict)
2009 && nds32_legitimate_index_p (mode, op1, strict))
2010 return true;
2011 else
2012 return false;
2013 }
2014
2015 return false;
2016
2017 case POST_INC:
2018 case POST_DEC:
2019 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2020 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2021 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2022 We only need to deal with register Ra. */
2023 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
2024 return true;
2025 else
2026 return false;
2027
2028 case PLUS:
2029 /* (mem (plus reg const_int))
2030 => [Ra + imm] */
2031 /* (mem (plus reg reg))
2032 => [Ra + Rb] */
2033 /* (mem (plus (mult reg const_int) reg))
2034 => [Ra + Rb << sv] */
2035 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2036 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict))
2037 return true;
2038 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2039 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict))
2040 return true;
2041 else
2042 return false;
2043
2044 case LO_SUM:
2045 /* (mem (lo_sum (reg) (symbol_ref))) */
2046 /* (mem (lo_sum (reg) (const))) */
2047 gcc_assert (REG_P (XEXP (x, 0)));
2048 if (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
2049 || GET_CODE (XEXP (x, 1)) == CONST)
2050 return nds32_legitimate_address_p (mode, XEXP (x, 1), strict);
2051 else
2052 return false;
2053
2054 default:
2055 return false;
2056 }
2057 }
2058
2059 \f
2060 /* Describing Relative Costs of Operations. */
2061
2062 static int
2063 nds32_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2064 reg_class_t from,
2065 reg_class_t to)
2066 {
2067 if (from == HIGH_REGS || to == HIGH_REGS)
2068 return 6;
2069
2070 return 2;
2071 }
2072
2073 static int
2074 nds32_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2075 reg_class_t rclass ATTRIBUTE_UNUSED,
2076 bool in ATTRIBUTE_UNUSED)
2077 {
2078 return 8;
2079 }
2080
2081 /* This target hook describes the relative costs of RTL expressions.
2082 Return 'true' when all subexpressions of x have been processed.
2083 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
2084 Refer to gcc/rtlanal.c for more information. */
2085 static bool
2086 nds32_rtx_costs (rtx x,
2087 machine_mode mode,
2088 int outer_code,
2089 int opno,
2090 int *total,
2091 bool speed)
2092 {
2093 return nds32_rtx_costs_impl (x, mode, outer_code, opno, total, speed);
2094 }
2095
2096 static int
2097 nds32_address_cost (rtx address,
2098 machine_mode mode,
2099 addr_space_t as,
2100 bool speed)
2101 {
2102 return nds32_address_cost_impl (address, mode, as, speed);
2103 }
2104
2105 \f
2106 /* Dividing the Output into Sections (Texts, Data, . . . ). */
2107
2108 /* If references to a symbol or a constant must be treated differently
2109 depending on something about the variable or function named by the symbol
2110 (such as what section it is in), we use this hook to store flags
2111 in symbol_ref rtx. */
2112 static void
2113 nds32_encode_section_info (tree decl, rtx rtl, int new_decl_p)
2114 {
2115 default_encode_section_info (decl, rtl, new_decl_p);
2116
2117 /* For the memory rtx, if it references to rodata section, we can store
2118 NDS32_SYMBOL_FLAG_RODATA flag into symbol_ref rtx so that the
2119 nds32_legitimate_address_p() can determine how to treat such symbol_ref
2120 based on -mcmodel=X and this information. */
2121 if (MEM_P (rtl) && MEM_READONLY_P (rtl))
2122 {
2123 rtx addr = XEXP (rtl, 0);
2124
2125 if (GET_CODE (addr) == SYMBOL_REF)
2126 {
2127 /* For (mem (symbol_ref X)) case. */
2128 SYMBOL_REF_FLAGS (addr) |= NDS32_SYMBOL_FLAG_RODATA;
2129 }
2130 else if (GET_CODE (addr) == CONST
2131 && GET_CODE (XEXP (addr, 0)) == PLUS)
2132 {
2133 /* For (mem (const (plus (symbol_ref X) (const_int N)))) case. */
2134 rtx plus_op = XEXP (addr, 0);
2135 rtx op0 = XEXP (plus_op, 0);
2136 rtx op1 = XEXP (plus_op, 1);
2137
2138 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
2139 SYMBOL_REF_FLAGS (op0) |= NDS32_SYMBOL_FLAG_RODATA;
2140 }
2141 }
2142 }
2143
2144 \f
2145 /* Defining the Output Assembler Language. */
2146
2147 /* -- The Overall Framework of an Assembler File. */
2148
2149 static void
2150 nds32_asm_file_start (void)
2151 {
2152 default_file_start ();
2153
2154 /* Tell assembler which ABI we are using. */
2155 fprintf (asm_out_file, "\t! ABI version\n");
2156 fprintf (asm_out_file, "\t.abi_2\n");
2157
2158 /* Tell assembler that this asm code is generated by compiler. */
2159 fprintf (asm_out_file, "\t! This asm file is generated by compiler\n");
2160 fprintf (asm_out_file, "\t.flag\tverbatim\n");
2161 /* Give assembler the size of each vector for interrupt handler. */
2162 fprintf (asm_out_file, "\t! This vector size directive is required "
2163 "for checking inconsistency on interrupt handler\n");
2164 fprintf (asm_out_file, "\t.vec_size\t%d\n", nds32_isr_vector_size);
2165
2166 fprintf (asm_out_file, "\t! ------------------------------------\n");
2167
2168 if (TARGET_ISA_V2)
2169 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V2");
2170 if (TARGET_ISA_V3)
2171 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3");
2172 if (TARGET_ISA_V3M)
2173 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3M");
2174
2175 if (TARGET_CMODEL_SMALL)
2176 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "SMALL");
2177 if (TARGET_CMODEL_MEDIUM)
2178 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "MEDIUM");
2179 if (TARGET_CMODEL_LARGE)
2180 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "LARGE");
2181
2182 fprintf (asm_out_file, "\t! Endian setting\t: %s\n",
2183 ((TARGET_BIG_ENDIAN) ? "big-endian"
2184 : "little-endian"));
2185
2186 fprintf (asm_out_file, "\t! ------------------------------------\n");
2187
2188 fprintf (asm_out_file, "\t! Use conditional move\t\t: %s\n",
2189 ((TARGET_CMOV) ? "Yes"
2190 : "No"));
2191 fprintf (asm_out_file, "\t! Use performance extension\t: %s\n",
2192 ((TARGET_EXT_PERF) ? "Yes"
2193 : "No"));
2194 fprintf (asm_out_file, "\t! Use performance extension 2\t: %s\n",
2195 ((TARGET_EXT_PERF2) ? "Yes"
2196 : "No"));
2197 fprintf (asm_out_file, "\t! Use string extension\t\t: %s\n",
2198 ((TARGET_EXT_STRING) ? "Yes"
2199 : "No"));
2200
2201 fprintf (asm_out_file, "\t! ------------------------------------\n");
2202
2203 fprintf (asm_out_file, "\t! V3PUSH instructions\t: %s\n",
2204 ((TARGET_V3PUSH) ? "Yes"
2205 : "No"));
2206 fprintf (asm_out_file, "\t! 16-bit instructions\t: %s\n",
2207 ((TARGET_16_BIT) ? "Yes"
2208 : "No"));
2209 fprintf (asm_out_file, "\t! Reduced registers set\t: %s\n",
2210 ((TARGET_REDUCED_REGS) ? "Yes"
2211 : "No"));
2212
2213 fprintf (asm_out_file, "\t! ------------------------------------\n");
2214
2215 if (optimize_size)
2216 fprintf (asm_out_file, "\t! Optimization level\t: -Os\n");
2217 else
2218 fprintf (asm_out_file, "\t! Optimization level\t: -O%d\n", optimize);
2219
2220 fprintf (asm_out_file, "\t! ------------------------------------\n");
2221
2222 fprintf (asm_out_file, "\t! Cache block size\t: %d\n",
2223 nds32_cache_block_size);
2224
2225 fprintf (asm_out_file, "\t! ------------------------------------\n");
2226
2227 nds32_asm_file_start_for_isr ();
2228 }
2229
2230 static void
2231 nds32_asm_file_end (void)
2232 {
2233 nds32_asm_file_end_for_isr ();
2234
2235 fprintf (asm_out_file, "\t! ------------------------------------\n");
2236 }
2237
2238 /* -- Output and Generation of Labels. */
2239
2240 static void
2241 nds32_asm_globalize_label (FILE *stream, const char *name)
2242 {
2243 fputs ("\t.global\t", stream);
2244 assemble_name (stream, name);
2245 fputs ("\n", stream);
2246 }
2247
2248 /* -- Output of Assembler Instructions. */
2249
2250 static void
2251 nds32_print_operand (FILE *stream, rtx x, int code)
2252 {
2253 int op_value;
2254
2255 switch (code)
2256 {
2257 case 0 :
2258 /* Do nothing special. */
2259 break;
2260
2261 case 'V':
2262 /* 'x' is supposed to be CONST_INT, get the value. */
2263 gcc_assert (CONST_INT_P (x));
2264 op_value = INTVAL (x);
2265
2266 /* According to the Andes architecture,
2267 the system/user register index range is 0 ~ 1023.
2268 In order to avoid conflict between user-specified-integer value
2269 and enum-specified-register value,
2270 the 'enum nds32_intrinsic_registers' value
2271 in nds32_intrinsic.h starts from 1024. */
2272 if (op_value < 1024 && op_value >= 0)
2273 {
2274 /* If user gives integer value directly (0~1023),
2275 we just print out the value. */
2276 fprintf (stream, "%d", op_value);
2277 }
2278 else if (op_value < 0
2279 || op_value >= ((int) ARRAY_SIZE (nds32_intrinsic_register_names)
2280 + 1024))
2281 {
2282 /* The enum index value for array size is out of range. */
2283 error ("intrinsic register index is out of range");
2284 }
2285 else
2286 {
2287 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2288 we can print out register name. Remember to substract 1024. */
2289 fprintf (stream, "%s",
2290 nds32_intrinsic_register_names[op_value - 1024]);
2291 }
2292
2293 /* No need to handle following process, so return immediately. */
2294 return;
2295
2296 default :
2297 /* Unknown flag. */
2298 output_operand_lossage ("invalid operand output code");
2299 break;
2300 }
2301
2302 switch (GET_CODE (x))
2303 {
2304 case LABEL_REF:
2305 case SYMBOL_REF:
2306 output_addr_const (stream, x);
2307 break;
2308
2309 case REG:
2310 /* Forbid using static chain register ($r16)
2311 on reduced-set registers configuration. */
2312 if (TARGET_REDUCED_REGS
2313 && REGNO (x) == STATIC_CHAIN_REGNUM)
2314 sorry ("a nested function is not supported for reduced registers");
2315
2316 /* Normal cases, print out register name. */
2317 fputs (reg_names[REGNO (x)], stream);
2318 break;
2319
2320 case MEM:
2321 output_address (GET_MODE (x), XEXP (x, 0));
2322 break;
2323
2324 case CODE_LABEL:
2325 case CONST_INT:
2326 case CONST:
2327 output_addr_const (stream, x);
2328 break;
2329
2330 default:
2331 /* Generally, output_addr_const () is able to handle most cases.
2332 We want to see what CODE could appear,
2333 so we use gcc_unreachable() to stop it. */
2334 debug_rtx (x);
2335 gcc_unreachable ();
2336 break;
2337 }
2338 }
2339
2340 static void
2341 nds32_print_operand_address (FILE *stream, machine_mode /*mode*/, rtx x)
2342 {
2343 rtx op0, op1;
2344
2345 switch (GET_CODE (x))
2346 {
2347 case SYMBOL_REF:
2348 case CONST:
2349 /* [ + symbol_ref] */
2350 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2351 fputs ("[ + ", stream);
2352 output_addr_const (stream, x);
2353 fputs ("]", stream);
2354 break;
2355
2356 case REG:
2357 /* Forbid using static chain register ($r16)
2358 on reduced-set registers configuration. */
2359 if (TARGET_REDUCED_REGS
2360 && REGNO (x) == STATIC_CHAIN_REGNUM)
2361 sorry ("a nested function is not supported for reduced registers");
2362
2363 /* [Ra] */
2364 fprintf (stream, "[%s]", reg_names[REGNO (x)]);
2365 break;
2366
2367 case PLUS:
2368 op0 = XEXP (x, 0);
2369 op1 = XEXP (x, 1);
2370
2371 /* Checking op0, forbid using static chain register ($r16)
2372 on reduced-set registers configuration. */
2373 if (TARGET_REDUCED_REGS
2374 && REG_P (op0)
2375 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2376 sorry ("a nested function is not supported for reduced registers");
2377 /* Checking op1, forbid using static chain register ($r16)
2378 on reduced-set registers configuration. */
2379 if (TARGET_REDUCED_REGS
2380 && REG_P (op1)
2381 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2382 sorry ("a nested function is not supported for reduced registers");
2383
2384 if (REG_P (op0) && CONST_INT_P (op1))
2385 {
2386 /* [Ra + imm] */
2387 fprintf (stream, "[%s + (%d)]",
2388 reg_names[REGNO (op0)], (int)INTVAL (op1));
2389 }
2390 else if (REG_P (op0) && REG_P (op1))
2391 {
2392 /* [Ra + Rb] */
2393 fprintf (stream, "[%s + %s]",
2394 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2395 }
2396 else if (GET_CODE (op0) == MULT && REG_P (op1))
2397 {
2398 /* [Ra + Rb << sv]
2399 From observation, the pattern looks like:
2400 (plus:SI (mult:SI (reg:SI 58)
2401 (const_int 4 [0x4]))
2402 (reg/f:SI 57)) */
2403 int sv;
2404
2405 /* We need to set sv to output shift value. */
2406 if (INTVAL (XEXP (op0, 1)) == 1)
2407 sv = 0;
2408 else if (INTVAL (XEXP (op0, 1)) == 2)
2409 sv = 1;
2410 else if (INTVAL (XEXP (op0, 1)) == 4)
2411 sv = 2;
2412 else
2413 gcc_unreachable ();
2414
2415 fprintf (stream, "[%s + %s << %d]",
2416 reg_names[REGNO (op1)],
2417 reg_names[REGNO (XEXP (op0, 0))],
2418 sv);
2419 }
2420 else
2421 {
2422 /* The control flow is not supposed to be here. */
2423 debug_rtx (x);
2424 gcc_unreachable ();
2425 }
2426
2427 break;
2428
2429 case POST_MODIFY:
2430 /* (post_modify (regA) (plus (regA) (regB)))
2431 (post_modify (regA) (plus (regA) (const_int)))
2432 We would like to extract
2433 regA and regB (or const_int) from plus rtx. */
2434 op0 = XEXP (XEXP (x, 1), 0);
2435 op1 = XEXP (XEXP (x, 1), 1);
2436
2437 /* Checking op0, forbid using static chain register ($r16)
2438 on reduced-set registers configuration. */
2439 if (TARGET_REDUCED_REGS
2440 && REG_P (op0)
2441 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2442 sorry ("a nested function is not supported for reduced registers");
2443 /* Checking op1, forbid using static chain register ($r16)
2444 on reduced-set registers configuration. */
2445 if (TARGET_REDUCED_REGS
2446 && REG_P (op1)
2447 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2448 sorry ("a nested function is not supported for reduced registers");
2449
2450 if (REG_P (op0) && REG_P (op1))
2451 {
2452 /* [Ra], Rb */
2453 fprintf (stream, "[%s], %s",
2454 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2455 }
2456 else if (REG_P (op0) && CONST_INT_P (op1))
2457 {
2458 /* [Ra], imm */
2459 fprintf (stream, "[%s], %d",
2460 reg_names[REGNO (op0)], (int)INTVAL (op1));
2461 }
2462 else
2463 {
2464 /* The control flow is not supposed to be here. */
2465 debug_rtx (x);
2466 gcc_unreachable ();
2467 }
2468
2469 break;
2470
2471 case POST_INC:
2472 case POST_DEC:
2473 op0 = XEXP (x, 0);
2474
2475 /* Checking op0, forbid using static chain register ($r16)
2476 on reduced-set registers configuration. */
2477 if (TARGET_REDUCED_REGS
2478 && REG_P (op0)
2479 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2480 sorry ("a nested function is not supported for reduced registers");
2481
2482 if (REG_P (op0))
2483 {
2484 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
2485 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2486 We only need to deal with register Ra. */
2487 fprintf (stream, "[%s]", reg_names[REGNO (op0)]);
2488 }
2489 else
2490 {
2491 /* The control flow is not supposed to be here. */
2492 debug_rtx (x);
2493 gcc_unreachable ();
2494 }
2495
2496 break;
2497
2498 default :
2499 /* Generally, output_addr_const () is able to handle most cases.
2500 We want to see what CODE could appear,
2501 so we use gcc_unreachable() to stop it. */
2502 debug_rtx (x);
2503 gcc_unreachable ();
2504 break;
2505 }
2506 }
2507
2508 \f
2509 /* Defining target-specific uses of __attribute__. */
2510
2511 /* Add some checking after merging attributes. */
2512 static tree
2513 nds32_merge_decl_attributes (tree olddecl, tree newdecl)
2514 {
2515 tree combined_attrs;
2516
2517 /* Create combined attributes. */
2518 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
2519 DECL_ATTRIBUTES (newdecl));
2520
2521 /* Since newdecl is acutally a duplicate of olddecl,
2522 we can take olddecl for some operations. */
2523 if (TREE_CODE (olddecl) == FUNCTION_DECL)
2524 {
2525 /* Check isr-specific attributes conflict. */
2526 nds32_check_isr_attrs_conflict (olddecl, combined_attrs);
2527 }
2528
2529 return combined_attrs;
2530 }
2531
2532 /* Add some checking when inserting attributes. */
2533 static void
2534 nds32_insert_attributes (tree decl, tree *attributes)
2535 {
2536 /* For function declaration, we need to check isr-specific attributes:
2537 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
2538 2. Check valid integer value for interrupt/exception.
2539 3. Check valid integer value for reset.
2540 4. Check valid function for nmi/warm. */
2541 if (TREE_CODE (decl) == FUNCTION_DECL)
2542 {
2543 tree func_attrs;
2544 tree intr, excp, reset;
2545
2546 /* Pick up function attributes. */
2547 func_attrs = *attributes;
2548
2549 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
2550 nds32_check_isr_attrs_conflict (decl, func_attrs);
2551
2552 /* Now we are starting to check valid id value
2553 for interrupt/exception/reset.
2554 Note that we ONLY check its validity here.
2555 To construct isr vector information, it is still performed
2556 by nds32_construct_isr_vectors_information(). */
2557 intr = lookup_attribute ("interrupt", func_attrs);
2558 excp = lookup_attribute ("exception", func_attrs);
2559 reset = lookup_attribute ("reset", func_attrs);
2560
2561 if (intr || excp)
2562 {
2563 /* Deal with interrupt/exception. */
2564 tree id_list;
2565 unsigned int lower_bound, upper_bound;
2566
2567 /* The way to handle interrupt or exception is the same,
2568 we just need to take care of actual vector number.
2569 For interrupt(0..63), the actual vector number is (9..72).
2570 For exception(1..8), the actual vector number is (1..8). */
2571 lower_bound = (intr) ? (0) : (1);
2572 upper_bound = (intr) ? (63) : (8);
2573
2574 /* Prepare id list so that we can traverse id value. */
2575 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
2576
2577 /* 2. Check valid integer value for interrupt/exception. */
2578 while (id_list)
2579 {
2580 tree id;
2581
2582 /* Pick up each vector id value. */
2583 id = TREE_VALUE (id_list);
2584 /* Issue error if it is not a valid integer value. */
2585 if (TREE_CODE (id) != INTEGER_CST
2586 || wi::ltu_p (wi::to_wide (id), lower_bound)
2587 || wi::gtu_p (wi::to_wide (id), upper_bound))
2588 error ("invalid id value for interrupt/exception attribute");
2589
2590 /* Advance to next id. */
2591 id_list = TREE_CHAIN (id_list);
2592 }
2593 }
2594 else if (reset)
2595 {
2596 /* Deal with reset. */
2597 tree id_list;
2598 tree id;
2599 tree nmi, warm;
2600 unsigned int lower_bound;
2601 unsigned int upper_bound;
2602
2603 /* Prepare id_list and identify id value so that
2604 we can check if total number of vectors is valid. */
2605 id_list = TREE_VALUE (reset);
2606 id = TREE_VALUE (id_list);
2607
2608 /* The maximum numbers for user's interrupt is 64. */
2609 lower_bound = 0;
2610 upper_bound = 64;
2611
2612 /* 3. Check valid integer value for reset. */
2613 if (TREE_CODE (id) != INTEGER_CST
2614 || wi::ltu_p (wi::to_wide (id), lower_bound)
2615 || wi::gtu_p (wi::to_wide (id), upper_bound))
2616 error ("invalid id value for reset attribute");
2617
2618 /* 4. Check valid function for nmi/warm. */
2619 nmi = lookup_attribute ("nmi", func_attrs);
2620 warm = lookup_attribute ("warm", func_attrs);
2621
2622 if (nmi != NULL_TREE)
2623 {
2624 tree nmi_func_list;
2625 tree nmi_func;
2626
2627 nmi_func_list = TREE_VALUE (nmi);
2628 nmi_func = TREE_VALUE (nmi_func_list);
2629
2630 /* Issue error if it is not a valid nmi function. */
2631 if (TREE_CODE (nmi_func) != IDENTIFIER_NODE)
2632 error ("invalid nmi function for reset attribute");
2633 }
2634
2635 if (warm != NULL_TREE)
2636 {
2637 tree warm_func_list;
2638 tree warm_func;
2639
2640 warm_func_list = TREE_VALUE (warm);
2641 warm_func = TREE_VALUE (warm_func_list);
2642
2643 /* Issue error if it is not a valid warm function. */
2644 if (TREE_CODE (warm_func) != IDENTIFIER_NODE)
2645 error ("invalid warm function for reset attribute");
2646 }
2647 }
2648 else
2649 {
2650 /* No interrupt, exception, or reset attribute is set. */
2651 return;
2652 }
2653 }
2654 }
2655
2656 static bool
2657 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED,
2658 tree pop_target ATTRIBUTE_UNUSED)
2659 {
2660 /* Currently, we do not parse any pragma target by ourself,
2661 so just simply return false. */
2662 return false;
2663 }
2664
2665 static void
2666 nds32_option_override (void)
2667 {
2668 /* After all the command options have been parsed,
2669 we shall deal with some flags for changing compiler settings. */
2670
2671 /* At first, we check if we have to strictly
2672 set some flags based on ISA family. */
2673 if (TARGET_ISA_V2)
2674 {
2675 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
2676 target_flags &= ~MASK_V3PUSH;
2677 }
2678 if (TARGET_ISA_V3)
2679 {
2680 /* Under V3 ISA, currently nothing should be strictly set. */
2681 }
2682 if (TARGET_ISA_V3M)
2683 {
2684 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
2685 target_flags |= MASK_REDUCED_REGS;
2686 /* Under V3M ISA, we need to strictly disable TARGET_EXT_PERF. */
2687 target_flags &= ~MASK_EXT_PERF;
2688 /* Under V3M ISA, we need to strictly disable TARGET_EXT_PERF2. */
2689 target_flags &= ~MASK_EXT_PERF2;
2690 /* Under V3M ISA, we need to strictly disable TARGET_EXT_STRING. */
2691 target_flags &= ~MASK_EXT_STRING;
2692 }
2693
2694 /* See if we are using reduced-set registers:
2695 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
2696 If so, we must forbid using $r11~$r14, $r16~$r27. */
2697 if (TARGET_REDUCED_REGS)
2698 {
2699 int r;
2700
2701 /* Prevent register allocator from
2702 choosing it as doing register allocation. */
2703 for (r = 11; r <= 14; r++)
2704 fixed_regs[r] = call_used_regs[r] = 1;
2705 for (r = 16; r <= 27; r++)
2706 fixed_regs[r] = call_used_regs[r] = 1;
2707 }
2708
2709 if (!TARGET_16_BIT)
2710 {
2711 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
2712 target_flags &= ~MASK_V3PUSH;
2713 }
2714
2715 /* Currently, we don't support PIC code generation yet. */
2716 if (flag_pic)
2717 sorry ("position-independent code not supported");
2718 }
2719
2720 \f
2721 /* Miscellaneous Parameters. */
2722
2723 static void
2724 nds32_init_builtins (void)
2725 {
2726 nds32_init_builtins_impl ();
2727 }
2728
2729 static rtx
2730 nds32_expand_builtin (tree exp,
2731 rtx target,
2732 rtx subtarget,
2733 machine_mode mode,
2734 int ignore)
2735 {
2736 return nds32_expand_builtin_impl (exp, target, subtarget, mode, ignore);
2737 }
2738
2739
2740 /* ------------------------------------------------------------------------ */
2741
2742 /* PART 4: Implemet extern function definitions,
2743 the prototype is in nds32-protos.h. */
2744 \f
2745 /* Defining Data Structures for Per-function Information. */
2746
2747 void
2748 nds32_init_expanders (void)
2749 {
2750 /* Arrange to initialize and mark the machine per-function status. */
2751 init_machine_status = nds32_init_machine_status;
2752 }
2753
2754 \f
2755 /* Register Usage. */
2756
2757 /* -- How Values Fit in Registers. */
2758
2759 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2760
2761 static bool
2762 nds32_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2763 {
2764 /* Restrict double-word quantities to even register pairs. */
2765 if (targetm.hard_regno_nregs (regno, mode) == 1
2766 || !((regno) & 1))
2767 return true;
2768
2769 return false;
2770 }
2771
2772 #undef TARGET_HARD_REGNO_MODE_OK
2773 #define TARGET_HARD_REGNO_MODE_OK nds32_hard_regno_mode_ok
2774
2775 /* Implement TARGET_MODES_TIEABLE_P. We can use general registers to
2776 tie QI/HI/SI modes together. */
2777
2778 static bool
2779 nds32_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2780 {
2781 return (GET_MODE_CLASS (mode1) == MODE_INT
2782 && GET_MODE_CLASS (mode2) == MODE_INT
2783 && GET_MODE_SIZE (mode1) <= UNITS_PER_WORD
2784 && GET_MODE_SIZE (mode2) <= UNITS_PER_WORD);
2785 }
2786
2787 #undef TARGET_MODES_TIEABLE_P
2788 #define TARGET_MODES_TIEABLE_P nds32_modes_tieable_p
2789 \f
2790 /* Register Classes. */
2791
2792 enum reg_class
2793 nds32_regno_reg_class (int regno)
2794 {
2795 /* Refer to nds32.h for more register class details. */
2796
2797 if (regno >= 0 && regno <= 7)
2798 return LOW_REGS;
2799 else if (regno >= 8 && regno <= 11)
2800 return MIDDLE_REGS;
2801 else if (regno >= 12 && regno <= 14)
2802 return HIGH_REGS;
2803 else if (regno == 15)
2804 return R15_TA_REG;
2805 else if (regno >= 16 && regno <= 19)
2806 return MIDDLE_REGS;
2807 else if (regno >= 20 && regno <= 31)
2808 return HIGH_REGS;
2809 else if (regno == 32 || regno == 33)
2810 return FRAME_REGS;
2811 else
2812 return NO_REGS;
2813 }
2814
2815 \f
2816 /* Stack Layout and Calling Conventions. */
2817
2818 /* -- Basic Stack Layout. */
2819
2820 rtx
2821 nds32_return_addr_rtx (int count,
2822 rtx frameaddr ATTRIBUTE_UNUSED)
2823 {
2824 /* There is no way to determine the return address
2825 if frameaddr is the frame that has 'count' steps
2826 up from current frame. */
2827 if (count != 0)
2828 return NULL_RTX;
2829
2830 /* If count == 0, it means we are at current frame,
2831 the return address is $r30 ($lp). */
2832 return get_hard_reg_initial_val (Pmode, LP_REGNUM);
2833 }
2834
2835 /* -- Eliminating Frame Pointer and Arg Pointer. */
2836
2837 HOST_WIDE_INT
2838 nds32_initial_elimination_offset (unsigned int from_reg, unsigned int to_reg)
2839 {
2840 HOST_WIDE_INT offset;
2841
2842 /* Compute and setup stack frame size.
2843 The result will be in cfun->machine. */
2844 nds32_compute_stack_frame ();
2845
2846 /* Remember to consider
2847 cfun->machine->callee_saved_area_gpr_padding_bytes
2848 when calculating offset. */
2849 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
2850 {
2851 offset = (cfun->machine->fp_size
2852 + cfun->machine->gp_size
2853 + cfun->machine->lp_size
2854 + cfun->machine->callee_saved_gpr_regs_size
2855 + cfun->machine->callee_saved_area_gpr_padding_bytes
2856 + cfun->machine->local_size
2857 + cfun->machine->out_args_size);
2858 }
2859 else if (from_reg == ARG_POINTER_REGNUM
2860 && to_reg == HARD_FRAME_POINTER_REGNUM)
2861 {
2862 offset = 0;
2863 }
2864 else if (from_reg == FRAME_POINTER_REGNUM
2865 && to_reg == STACK_POINTER_REGNUM)
2866 {
2867 offset = (cfun->machine->local_size + cfun->machine->out_args_size);
2868 }
2869 else if (from_reg == FRAME_POINTER_REGNUM
2870 && to_reg == HARD_FRAME_POINTER_REGNUM)
2871 {
2872 offset = (-1) * (cfun->machine->fp_size
2873 + cfun->machine->gp_size
2874 + cfun->machine->lp_size
2875 + cfun->machine->callee_saved_gpr_regs_size
2876 + cfun->machine->callee_saved_area_gpr_padding_bytes);
2877 }
2878 else
2879 {
2880 gcc_unreachable ();
2881 }
2882
2883 return offset;
2884 }
2885
2886 /* -- Passing Arguments in Registers. */
2887
2888 void
2889 nds32_init_cumulative_args (CUMULATIVE_ARGS *cum,
2890 tree fntype ATTRIBUTE_UNUSED,
2891 rtx libname ATTRIBUTE_UNUSED,
2892 tree fndecl ATTRIBUTE_UNUSED,
2893 int n_named_args ATTRIBUTE_UNUSED)
2894 {
2895 /* Initial available registers
2896 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
2897 for passing arguments. */
2898 cum->gpr_offset = 0;
2899 }
2900
2901 /* -- Function Entry and Exit. */
2902
2903 /* Function for normal multiple push prologue. */
2904 void
2905 nds32_expand_prologue (void)
2906 {
2907 int fp_adjust;
2908 int sp_adjust;
2909 int en4_const;
2910
2911 rtx Rb, Re;
2912 rtx fp_adjust_insn, sp_adjust_insn;
2913
2914 /* Compute and setup stack frame size.
2915 The result will be in cfun->machine. */
2916 nds32_compute_stack_frame ();
2917
2918 /* If this is a variadic function, first we need to push argument
2919 registers that hold the unnamed argument value. */
2920 if (cfun->machine->va_args_size != 0)
2921 {
2922 Rb = gen_rtx_REG (SImode, cfun->machine->va_args_first_regno);
2923 Re = gen_rtx_REG (SImode, cfun->machine->va_args_last_regno);
2924 /* No need to push $fp, $gp, or $lp, so use GEN_INT(0). */
2925 nds32_emit_stack_push_multiple (Rb, Re, GEN_INT (0), true);
2926
2927 /* We may also need to adjust stack pointer for padding bytes
2928 because varargs may cause $sp not 8-byte aligned. */
2929 if (cfun->machine->va_args_area_padding_bytes)
2930 {
2931 /* Generate sp adjustment instruction. */
2932 sp_adjust = cfun->machine->va_args_area_padding_bytes;
2933 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2934 stack_pointer_rtx,
2935 GEN_INT (-1 * sp_adjust));
2936
2937 /* Emit rtx into instructions list and receive INSN rtx form. */
2938 sp_adjust_insn = emit_insn (sp_adjust_insn);
2939
2940 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2941 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2942 generate CFI (Call Frame Information) stuff. */
2943 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
2944 }
2945 }
2946
2947 /* If the function is 'naked',
2948 we do not have to generate prologue code fragment. */
2949 if (cfun->machine->naked_p)
2950 return;
2951
2952 /* Get callee_first_regno and callee_last_regno. */
2953 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
2954 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
2955
2956 /* nds32_emit_stack_push_multiple(first_regno, last_regno),
2957 the pattern 'stack_push_multiple' is implemented in nds32.md.
2958 For En4 field, we have to calculate its constant value.
2959 Refer to Andes ISA for more information. */
2960 en4_const = 0;
2961 if (cfun->machine->fp_size)
2962 en4_const += 8;
2963 if (cfun->machine->gp_size)
2964 en4_const += 4;
2965 if (cfun->machine->lp_size)
2966 en4_const += 2;
2967
2968 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2969 to be saved, we don't have to create multiple push instruction.
2970 Otherwise, a multiple push instruction is needed. */
2971 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
2972 {
2973 /* Create multiple push instruction rtx. */
2974 nds32_emit_stack_push_multiple (Rb, Re, GEN_INT (en4_const), false);
2975 }
2976
2977 /* Check frame_pointer_needed to see
2978 if we shall emit fp adjustment instruction. */
2979 if (frame_pointer_needed)
2980 {
2981 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
2982 + (4 * callee-saved-registers)
2983 Note: No need to adjust
2984 cfun->machine->callee_saved_area_gpr_padding_bytes,
2985 because, at this point, stack pointer is just
2986 at the position after push instruction. */
2987 fp_adjust = cfun->machine->fp_size
2988 + cfun->machine->gp_size
2989 + cfun->machine->lp_size
2990 + cfun->machine->callee_saved_gpr_regs_size;
2991 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
2992 stack_pointer_rtx,
2993 GEN_INT (fp_adjust));
2994 /* Emit rtx into instructions list and receive INSN rtx form. */
2995 fp_adjust_insn = emit_insn (fp_adjust_insn);
2996
2997 /* The insn rtx 'fp_adjust_insn' will change frame layout. */
2998 RTX_FRAME_RELATED_P (fp_adjust_insn) = 1;
2999 }
3000
3001 /* Adjust $sp = $sp - local_size - out_args_size
3002 - callee_saved_area_gpr_padding_bytes. */
3003 sp_adjust = cfun->machine->local_size
3004 + cfun->machine->out_args_size
3005 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3006 /* sp_adjust value may be out of range of the addi instruction,
3007 create alternative add behavior with TA_REGNUM if necessary,
3008 using NEGATIVE value to tell that we are decreasing address. */
3009 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3010 if (sp_adjust)
3011 {
3012 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
3013 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3014 stack_pointer_rtx,
3015 GEN_INT (-1 * sp_adjust));
3016 /* Emit rtx into instructions list and receive INSN rtx form. */
3017 sp_adjust_insn = emit_insn (sp_adjust_insn);
3018
3019 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3020 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3021 generate CFI (Call Frame Information) stuff. */
3022 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3023 }
3024
3025 /* Prevent the instruction scheduler from
3026 moving instructions across the boundary. */
3027 emit_insn (gen_blockage ());
3028 }
3029
3030 /* Function for normal multiple pop epilogue. */
3031 void
3032 nds32_expand_epilogue (bool sibcall_p)
3033 {
3034 int sp_adjust;
3035 int en4_const;
3036
3037 rtx Rb, Re;
3038 rtx sp_adjust_insn;
3039
3040 /* Compute and setup stack frame size.
3041 The result will be in cfun->machine. */
3042 nds32_compute_stack_frame ();
3043
3044 /* Prevent the instruction scheduler from
3045 moving instructions across the boundary. */
3046 emit_insn (gen_blockage ());
3047
3048 /* If the function is 'naked', we do not have to generate
3049 epilogue code fragment BUT 'ret' instruction.
3050 However, if this function is also a variadic function,
3051 we need to create adjust stack pointer before 'ret' instruction. */
3052 if (cfun->machine->naked_p)
3053 {
3054 /* If this is a variadic function, we do not have to restore argument
3055 registers but need to adjust stack pointer back to previous stack
3056 frame location before return. */
3057 if (cfun->machine->va_args_size != 0)
3058 {
3059 /* Generate sp adjustment instruction.
3060 We need to consider padding bytes here. */
3061 sp_adjust = cfun->machine->va_args_size
3062 + cfun->machine->va_args_area_padding_bytes;
3063 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3064 stack_pointer_rtx,
3065 GEN_INT (sp_adjust));
3066 /* Emit rtx into instructions list and receive INSN rtx form. */
3067 sp_adjust_insn = emit_insn (sp_adjust_insn);
3068
3069 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3070 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3071 generate CFI (Call Frame Information) stuff. */
3072 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3073 }
3074
3075 /* Generate return instruction by using 'return_internal' pattern.
3076 Make sure this instruction is after gen_blockage(). */
3077 if (!sibcall_p)
3078 emit_jump_insn (gen_return_internal ());
3079 return;
3080 }
3081
3082 if (frame_pointer_needed)
3083 {
3084 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
3085 - (4 * callee-saved-registers)
3086 Note: No need to adjust
3087 cfun->machine->callee_saved_area_gpr_padding_bytes,
3088 because we want to adjust stack pointer
3089 to the position for pop instruction. */
3090 sp_adjust = cfun->machine->fp_size
3091 + cfun->machine->gp_size
3092 + cfun->machine->lp_size
3093 + cfun->machine->callee_saved_gpr_regs_size;
3094 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3095 hard_frame_pointer_rtx,
3096 GEN_INT (-1 * sp_adjust));
3097 /* Emit rtx into instructions list and receive INSN rtx form. */
3098 sp_adjust_insn = emit_insn (sp_adjust_insn);
3099
3100 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3101 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3102 }
3103 else
3104 {
3105 /* If frame pointer is NOT needed,
3106 we cannot calculate the sp adjustment from frame pointer.
3107 Instead, we calculate the adjustment by local_size,
3108 out_args_size, and callee_saved_area_padding_bytes.
3109 Notice that such sp adjustment value may be out of range,
3110 so we have to deal with it as well. */
3111
3112 /* Adjust $sp = $sp + local_size + out_args_size
3113 + callee_saved_area_padding_bytes. */
3114 sp_adjust = cfun->machine->local_size
3115 + cfun->machine->out_args_size
3116 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3117 /* sp_adjust value may be out of range of the addi instruction,
3118 create alternative add behavior with TA_REGNUM if necessary,
3119 using POSITIVE value to tell that we are increasing address. */
3120 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3121 if (sp_adjust)
3122 {
3123 /* Generate sp adjustment instruction
3124 if and only if sp_adjust != 0. */
3125 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3126 stack_pointer_rtx,
3127 GEN_INT (sp_adjust));
3128 /* Emit rtx into instructions list and receive INSN rtx form. */
3129 sp_adjust_insn = emit_insn (sp_adjust_insn);
3130
3131 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3132 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3133 }
3134 }
3135
3136 /* Get callee_first_regno and callee_last_regno. */
3137 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3138 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3139
3140 /* nds32_emit_stack_pop_multiple(first_regno, last_regno),
3141 the pattern 'stack_pop_multiple' is implementad in nds32.md.
3142 For En4 field, we have to calculate its constant value.
3143 Refer to Andes ISA for more information. */
3144 en4_const = 0;
3145 if (cfun->machine->fp_size)
3146 en4_const += 8;
3147 if (cfun->machine->gp_size)
3148 en4_const += 4;
3149 if (cfun->machine->lp_size)
3150 en4_const += 2;
3151
3152 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3153 to be saved, we don't have to create multiple pop instruction.
3154 Otherwise, a multiple pop instruction is needed. */
3155 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
3156 {
3157 /* Create multiple pop instruction rtx. */
3158 nds32_emit_stack_pop_multiple (Rb, Re, GEN_INT (en4_const));
3159 }
3160
3161 /* If this is a variadic function, we do not have to restore argument
3162 registers but need to adjust stack pointer back to previous stack
3163 frame location before return. */
3164 if (cfun->machine->va_args_size != 0)
3165 {
3166 /* Generate sp adjustment instruction.
3167 We need to consider padding bytes here. */
3168 sp_adjust = cfun->machine->va_args_size
3169 + cfun->machine->va_args_area_padding_bytes;
3170 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3171 stack_pointer_rtx,
3172 GEN_INT (sp_adjust));
3173 /* Emit rtx into instructions list and receive INSN rtx form. */
3174 sp_adjust_insn = emit_insn (sp_adjust_insn);
3175
3176 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3177 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3178 generate CFI (Call Frame Information) stuff. */
3179 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3180 }
3181
3182 /* Generate return instruction. */
3183 if (!sibcall_p)
3184 emit_jump_insn (gen_return_internal ());
3185 }
3186
3187 /* Function for v3push prologue. */
3188 void
3189 nds32_expand_prologue_v3push (void)
3190 {
3191 int fp_adjust;
3192 int sp_adjust;
3193
3194 rtx Rb, Re;
3195 rtx fp_adjust_insn, sp_adjust_insn;
3196
3197 /* Compute and setup stack frame size.
3198 The result will be in cfun->machine. */
3199 nds32_compute_stack_frame ();
3200
3201 /* If the function is 'naked',
3202 we do not have to generate prologue code fragment. */
3203 if (cfun->machine->naked_p)
3204 return;
3205
3206 /* Get callee_first_regno and callee_last_regno. */
3207 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3208 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3209
3210 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
3211 where imm8u has to be 8-byte alignment. */
3212 sp_adjust = cfun->machine->local_size
3213 + cfun->machine->out_args_size
3214 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3215
3216 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3217 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
3218 {
3219 /* We can use 'push25 Re,imm8u'. */
3220
3221 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3222 the pattern 'stack_v3push' is implemented in nds32.md.
3223 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3224 nds32_emit_stack_v3push (Rb, Re,
3225 GEN_INT (14), GEN_INT (sp_adjust));
3226
3227 /* Check frame_pointer_needed to see
3228 if we shall emit fp adjustment instruction. */
3229 if (frame_pointer_needed)
3230 {
3231 /* adjust $fp = $sp + 4 ($fp size)
3232 + 4 ($gp size)
3233 + 4 ($lp size)
3234 + (4 * n) (callee-saved registers)
3235 + sp_adjust ('push25 Re,imm8u')
3236 Note: Since we use 'push25 Re,imm8u',
3237 the position of stack pointer is further
3238 changed after push instruction.
3239 Hence, we need to take sp_adjust value
3240 into consideration. */
3241 fp_adjust = cfun->machine->fp_size
3242 + cfun->machine->gp_size
3243 + cfun->machine->lp_size
3244 + cfun->machine->callee_saved_gpr_regs_size
3245 + sp_adjust;
3246 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3247 stack_pointer_rtx,
3248 GEN_INT (fp_adjust));
3249 /* Emit rtx into instructions list and receive INSN rtx form. */
3250 fp_adjust_insn = emit_insn (fp_adjust_insn);
3251 }
3252 }
3253 else
3254 {
3255 /* We have to use 'push25 Re,0' and
3256 expand one more instruction to adjust $sp later. */
3257
3258 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3259 the pattern 'stack_v3push' is implemented in nds32.md.
3260 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3261 nds32_emit_stack_v3push (Rb, Re,
3262 GEN_INT (14), GEN_INT (0));
3263
3264 /* Check frame_pointer_needed to see
3265 if we shall emit fp adjustment instruction. */
3266 if (frame_pointer_needed)
3267 {
3268 /* adjust $fp = $sp + 4 ($fp size)
3269 + 4 ($gp size)
3270 + 4 ($lp size)
3271 + (4 * n) (callee-saved registers)
3272 Note: Since we use 'push25 Re,0',
3273 the stack pointer is just at the position
3274 after push instruction.
3275 No need to take sp_adjust into consideration. */
3276 fp_adjust = cfun->machine->fp_size
3277 + cfun->machine->gp_size
3278 + cfun->machine->lp_size
3279 + cfun->machine->callee_saved_gpr_regs_size;
3280 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3281 stack_pointer_rtx,
3282 GEN_INT (fp_adjust));
3283 /* Emit rtx into instructions list and receive INSN rtx form. */
3284 fp_adjust_insn = emit_insn (fp_adjust_insn);
3285 }
3286
3287 /* Because we use 'push25 Re,0',
3288 we need to expand one more instruction to adjust $sp.
3289 However, sp_adjust value may be out of range of the addi instruction,
3290 create alternative add behavior with TA_REGNUM if necessary,
3291 using NEGATIVE value to tell that we are decreasing address. */
3292 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3293 if (sp_adjust)
3294 {
3295 /* Generate sp adjustment instruction
3296 if and only if sp_adjust != 0. */
3297 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3298 stack_pointer_rtx,
3299 GEN_INT (-1 * sp_adjust));
3300 /* Emit rtx into instructions list and receive INSN rtx form. */
3301 sp_adjust_insn = emit_insn (sp_adjust_insn);
3302
3303 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3304 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3305 generate CFI (Call Frame Information) stuff. */
3306 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3307 }
3308 }
3309
3310 /* Prevent the instruction scheduler from
3311 moving instructions across the boundary. */
3312 emit_insn (gen_blockage ());
3313 }
3314
3315 /* Function for v3pop epilogue. */
3316 void
3317 nds32_expand_epilogue_v3pop (bool sibcall_p)
3318 {
3319 int sp_adjust;
3320
3321 rtx Rb, Re;
3322 rtx sp_adjust_insn;
3323
3324 /* Compute and setup stack frame size.
3325 The result will be in cfun->machine. */
3326 nds32_compute_stack_frame ();
3327
3328 /* Prevent the instruction scheduler from
3329 moving instructions across the boundary. */
3330 emit_insn (gen_blockage ());
3331
3332 /* If the function is 'naked', we do not have to generate
3333 epilogue code fragment BUT 'ret' instruction. */
3334 if (cfun->machine->naked_p)
3335 {
3336 /* Generate return instruction by using 'return_internal' pattern.
3337 Make sure this instruction is after gen_blockage(). */
3338 if (!sibcall_p)
3339 emit_jump_insn (gen_return_internal ());
3340 return;
3341 }
3342
3343 /* Get callee_first_regno and callee_last_regno. */
3344 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3345 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3346
3347 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3348 where imm8u has to be 8-byte alignment. */
3349 sp_adjust = cfun->machine->local_size
3350 + cfun->machine->out_args_size
3351 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3352
3353 /* We have to consider alloca issue as well.
3354 If the function does call alloca(), the stack pointer is not fixed.
3355 In that case, we cannot use 'pop25 Re,imm8u' directly.
3356 We have to caculate stack pointer from frame pointer
3357 and then use 'pop25 Re,0'.
3358 Of course, the frame_pointer_needed should be nonzero
3359 if the function calls alloca(). */
3360 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3361 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
3362 && !cfun->calls_alloca)
3363 {
3364 /* We can use 'pop25 Re,imm8u'. */
3365
3366 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3367 the pattern 'stack_v3pop' is implementad in nds32.md.
3368 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3369 nds32_emit_stack_v3pop (Rb, Re,
3370 GEN_INT (14), GEN_INT (sp_adjust));
3371 }
3372 else
3373 {
3374 /* We have to use 'pop25 Re,0', and prior to it,
3375 we must expand one more instruction to adjust $sp. */
3376
3377 if (frame_pointer_needed)
3378 {
3379 /* adjust $sp = $fp - 4 ($fp size)
3380 - 4 ($gp size)
3381 - 4 ($lp size)
3382 - (4 * n) (callee-saved registers)
3383 Note: No need to adjust
3384 cfun->machine->callee_saved_area_gpr_padding_bytes,
3385 because we want to adjust stack pointer
3386 to the position for pop instruction. */
3387 sp_adjust = cfun->machine->fp_size
3388 + cfun->machine->gp_size
3389 + cfun->machine->lp_size
3390 + cfun->machine->callee_saved_gpr_regs_size;
3391 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3392 hard_frame_pointer_rtx,
3393 GEN_INT (-1 * sp_adjust));
3394 /* Emit rtx into instructions list and receive INSN rtx form. */
3395 sp_adjust_insn = emit_insn (sp_adjust_insn);
3396 }
3397 else
3398 {
3399 /* If frame pointer is NOT needed,
3400 we cannot calculate the sp adjustment from frame pointer.
3401 Instead, we calculate the adjustment by local_size,
3402 out_args_size, and callee_saved_area_padding_bytes.
3403 Notice that such sp adjustment value may be out of range,
3404 so we have to deal with it as well. */
3405
3406 /* Adjust $sp = $sp + local_size + out_args_size
3407 + callee_saved_area_gpr_padding_bytes. */
3408 sp_adjust = cfun->machine->local_size
3409 + cfun->machine->out_args_size
3410 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3411 /* sp_adjust value may be out of range of the addi instruction,
3412 create alternative add behavior with TA_REGNUM if necessary,
3413 using POSITIVE value to tell that we are increasing address. */
3414 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3415 if (sp_adjust)
3416 {
3417 /* Generate sp adjustment instruction
3418 if and only if sp_adjust != 0. */
3419 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3420 stack_pointer_rtx,
3421 GEN_INT (sp_adjust));
3422 /* Emit rtx into instructions list and receive INSN rtx form. */
3423 sp_adjust_insn = emit_insn (sp_adjust_insn);
3424 }
3425 }
3426
3427 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3428 the pattern 'stack_v3pop' is implementad in nds32.md. */
3429 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3430 nds32_emit_stack_v3pop (Rb, Re,
3431 GEN_INT (14), GEN_INT (0));
3432 }
3433
3434 /* Generate return instruction. */
3435 emit_jump_insn (gen_pop25return ());
3436 }
3437
3438 /* Return nonzero if this function is known to have a null epilogue.
3439 This allows the optimizer to omit jumps to jumps if no stack
3440 was created. */
3441 int
3442 nds32_can_use_return_insn (void)
3443 {
3444 /* Prior to reloading, we can't tell how many registers must be saved.
3445 Thus we can not determine whether this function has null epilogue. */
3446 if (!reload_completed)
3447 return 0;
3448
3449 /* If no stack was created, two conditions must be satisfied:
3450 1. This is a naked function.
3451 So there is no callee-saved, local size, or outgoing size.
3452 2. This is NOT a variadic function.
3453 So there is no pushing arguement registers into the stack. */
3454 return (cfun->machine->naked_p && (cfun->machine->va_args_size == 0));
3455 }
3456
3457 /* ------------------------------------------------------------------------ */
3458
3459 /* Function to test 333-form for load/store instructions.
3460 This is auxiliary extern function for auxiliary macro in nds32.h.
3461 Because it is a little complicated, we use function instead of macro. */
3462 bool
3463 nds32_ls_333_p (rtx rt, rtx ra, rtx imm, machine_mode mode)
3464 {
3465 if (REGNO_REG_CLASS (REGNO (rt)) == LOW_REGS
3466 && REGNO_REG_CLASS (REGNO (ra)) == LOW_REGS)
3467 {
3468 if (GET_MODE_SIZE (mode) == 4)
3469 return satisfies_constraint_Iu05 (imm);
3470
3471 if (GET_MODE_SIZE (mode) == 2)
3472 return satisfies_constraint_Iu04 (imm);
3473
3474 if (GET_MODE_SIZE (mode) == 1)
3475 return satisfies_constraint_Iu03 (imm);
3476 }
3477
3478 return false;
3479 }
3480
3481
3482 /* Computing the Length of an Insn.
3483 Modifies the length assigned to instruction INSN.
3484 LEN is the initially computed length of the insn. */
3485 int
3486 nds32_adjust_insn_length (rtx_insn *insn, int length)
3487 {
3488 rtx src, dst;
3489
3490 switch (recog_memoized (insn))
3491 {
3492 case CODE_FOR_move_df:
3493 case CODE_FOR_move_di:
3494 /* Adjust length of movd44 to 2. */
3495 src = XEXP (PATTERN (insn), 1);
3496 dst = XEXP (PATTERN (insn), 0);
3497
3498 if (REG_P (src)
3499 && REG_P (dst)
3500 && (REGNO (src) % 2) == 0
3501 && (REGNO (dst) % 2) == 0)
3502 length = 2;
3503 break;
3504
3505 default:
3506 break;
3507 }
3508
3509 return length;
3510 }
3511
3512
3513 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
3514 int
3515 nds32_target_alignment (rtx_insn *label)
3516 {
3517 rtx_insn *insn;
3518
3519 if (optimize_size)
3520 return 0;
3521
3522 insn = next_active_insn (label);
3523
3524 if (insn == 0)
3525 return 0;
3526 else if ((get_attr_length (insn) % 4) == 0)
3527 return 2;
3528 else
3529 return 0;
3530 }
3531
3532 /* ------------------------------------------------------------------------ */
3533
3534 /* PART 5: Initialize target hook structure and definitions. */
3535 \f
3536 /* Controlling the Compilation Driver. */
3537
3538 \f
3539 /* Run-time Target Specification. */
3540
3541 \f
3542 /* Defining Data Structures for Per-function Information. */
3543
3544 \f
3545 /* Storage Layout. */
3546
3547 #undef TARGET_PROMOTE_FUNCTION_MODE
3548 #define TARGET_PROMOTE_FUNCTION_MODE \
3549 default_promote_function_mode_always_promote
3550
3551 \f
3552 /* Layout of Source Language Data Types. */
3553
3554 \f
3555 /* Register Usage. */
3556
3557 /* -- Basic Characteristics of Registers. */
3558
3559 /* -- Order of Allocation of Registers. */
3560
3561 /* -- How Values Fit in Registers. */
3562
3563 /* -- Handling Leaf Functions. */
3564
3565 /* -- Registers That Form a Stack. */
3566
3567 \f
3568 /* Register Classes. */
3569
3570 #undef TARGET_CLASS_MAX_NREGS
3571 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
3572
3573 #undef TARGET_REGISTER_PRIORITY
3574 #define TARGET_REGISTER_PRIORITY nds32_register_priority
3575
3576 \f
3577 /* Obsolete Macros for Defining Constraints. */
3578
3579 \f
3580 /* Stack Layout and Calling Conventions. */
3581
3582 /* -- Basic Stack Layout. */
3583
3584 /* -- Exception Handling Support. */
3585
3586 /* -- Specifying How Stack Checking is Done. */
3587
3588 /* -- Registers That Address the Stack Frame. */
3589
3590 /* -- Eliminating Frame Pointer and Arg Pointer. */
3591
3592 #undef TARGET_CAN_ELIMINATE
3593 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
3594
3595 /* -- Passing Function Arguments on the Stack. */
3596
3597 /* -- Passing Arguments in Registers. */
3598
3599 #undef TARGET_FUNCTION_ARG
3600 #define TARGET_FUNCTION_ARG nds32_function_arg
3601
3602 #undef TARGET_MUST_PASS_IN_STACK
3603 #define TARGET_MUST_PASS_IN_STACK nds32_must_pass_in_stack
3604
3605 #undef TARGET_ARG_PARTIAL_BYTES
3606 #define TARGET_ARG_PARTIAL_BYTES nds32_arg_partial_bytes
3607
3608 #undef TARGET_FUNCTION_ARG_ADVANCE
3609 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
3610
3611 #undef TARGET_FUNCTION_ARG_BOUNDARY
3612 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
3613
3614 /* -- How Scalar Function Values Are Returned. */
3615
3616 #undef TARGET_FUNCTION_VALUE
3617 #define TARGET_FUNCTION_VALUE nds32_function_value
3618
3619 #undef TARGET_LIBCALL_VALUE
3620 #define TARGET_LIBCALL_VALUE nds32_libcall_value
3621
3622 #undef TARGET_FUNCTION_VALUE_REGNO_P
3623 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
3624
3625 /* -- How Large Values Are Returned. */
3626
3627 /* -- Caller-Saves Register Allocation. */
3628
3629 /* -- Function Entry and Exit. */
3630
3631 #undef TARGET_ASM_FUNCTION_PROLOGUE
3632 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
3633
3634 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
3635 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
3636
3637 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
3638 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
3639
3640 #undef TARGET_ASM_FUNCTION_EPILOGUE
3641 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
3642
3643 #undef TARGET_ASM_OUTPUT_MI_THUNK
3644 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
3645
3646 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3647 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
3648
3649 /* -- Generating Code for Profiling. */
3650
3651 /* -- Permitting tail calls. */
3652
3653 #undef TARGET_WARN_FUNC_RETURN
3654 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
3655
3656 /* Stack smashing protection. */
3657
3658 \f
3659 /* Implementing the Varargs Macros. */
3660
3661 #undef TARGET_SETUP_INCOMING_VARARGS
3662 #define TARGET_SETUP_INCOMING_VARARGS nds32_setup_incoming_varargs
3663
3664 #undef TARGET_STRICT_ARGUMENT_NAMING
3665 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
3666
3667 \f
3668 /* Trampolines for Nested Functions. */
3669
3670 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
3671 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
3672
3673 #undef TARGET_TRAMPOLINE_INIT
3674 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
3675
3676 \f
3677 /* Implicit Calls to Library Routines. */
3678
3679 \f
3680 /* Addressing Modes. */
3681
3682 #undef TARGET_LEGITIMATE_ADDRESS_P
3683 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
3684
3685 \f
3686 /* Anchored Addresses. */
3687
3688 \f
3689 /* Condition Code Status. */
3690
3691 /* -- Representation of condition codes using (cc0). */
3692
3693 /* -- Representation of condition codes using registers. */
3694
3695 /* -- Macros to control conditional execution. */
3696
3697 \f
3698 /* Describing Relative Costs of Operations. */
3699
3700 #undef TARGET_REGISTER_MOVE_COST
3701 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
3702
3703 #undef TARGET_MEMORY_MOVE_COST
3704 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
3705
3706 #undef TARGET_RTX_COSTS
3707 #define TARGET_RTX_COSTS nds32_rtx_costs
3708
3709 #undef TARGET_ADDRESS_COST
3710 #define TARGET_ADDRESS_COST nds32_address_cost
3711
3712 \f
3713 /* Adjusting the Instruction Scheduler. */
3714
3715 \f
3716 /* Dividing the Output into Sections (Texts, Data, . . . ). */
3717
3718 #undef TARGET_ENCODE_SECTION_INFO
3719 #define TARGET_ENCODE_SECTION_INFO nds32_encode_section_info
3720
3721 \f
3722 /* Position Independent Code. */
3723
3724 \f
3725 /* Defining the Output Assembler Language. */
3726
3727 /* -- The Overall Framework of an Assembler File. */
3728
3729 #undef TARGET_ASM_FILE_START
3730 #define TARGET_ASM_FILE_START nds32_asm_file_start
3731 #undef TARGET_ASM_FILE_END
3732 #define TARGET_ASM_FILE_END nds32_asm_file_end
3733
3734 /* -- Output of Data. */
3735
3736 #undef TARGET_ASM_ALIGNED_HI_OP
3737 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3738
3739 #undef TARGET_ASM_ALIGNED_SI_OP
3740 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
3741
3742 /* -- Output of Uninitialized Variables. */
3743
3744 /* -- Output and Generation of Labels. */
3745
3746 #undef TARGET_ASM_GLOBALIZE_LABEL
3747 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
3748
3749 /* -- How Initialization Functions Are Handled. */
3750
3751 /* -- Macros Controlling Initialization Routines. */
3752
3753 /* -- Output of Assembler Instructions. */
3754
3755 #undef TARGET_PRINT_OPERAND
3756 #define TARGET_PRINT_OPERAND nds32_print_operand
3757 #undef TARGET_PRINT_OPERAND_ADDRESS
3758 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
3759
3760 /* -- Output of Dispatch Tables. */
3761
3762 /* -- Assembler Commands for Exception Regions. */
3763
3764 /* -- Assembler Commands for Alignment. */
3765
3766 \f
3767 /* Controlling Debugging Information Format. */
3768
3769 /* -- Macros Affecting All Debugging Formats. */
3770
3771 /* -- Specific Options for DBX Output. */
3772
3773 /* -- Open-Ended Hooks for DBX Format. */
3774
3775 /* -- File Names in DBX Format. */
3776
3777 /* -- Macros for DWARF Output. */
3778
3779 /* -- Macros for VMS Debug Format. */
3780
3781 \f
3782 /* Cross Compilation and Floating Point. */
3783
3784 \f
3785 /* Mode Switching Instructions. */
3786
3787 \f
3788 /* Defining target-specific uses of __attribute__. */
3789
3790 #undef TARGET_ATTRIBUTE_TABLE
3791 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
3792
3793 #undef TARGET_MERGE_DECL_ATTRIBUTES
3794 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
3795
3796 #undef TARGET_INSERT_ATTRIBUTES
3797 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
3798
3799 #undef TARGET_OPTION_PRAGMA_PARSE
3800 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
3801
3802 #undef TARGET_OPTION_OVERRIDE
3803 #define TARGET_OPTION_OVERRIDE nds32_option_override
3804
3805 \f
3806 /* Emulating TLS. */
3807
3808 \f
3809 /* Defining coprocessor specifics for MIPS targets. */
3810
3811 \f
3812 /* Parameters for Precompiled Header Validity Checking. */
3813
3814 \f
3815 /* C++ ABI parameters. */
3816
3817 \f
3818 /* Adding support for named address spaces. */
3819
3820 \f
3821 /* Miscellaneous Parameters. */
3822
3823 #undef TARGET_INIT_BUILTINS
3824 #define TARGET_INIT_BUILTINS nds32_init_builtins
3825
3826 #undef TARGET_EXPAND_BUILTIN
3827 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
3828
3829 \f
3830 /* ------------------------------------------------------------------------ */
3831
3832 /* Initialize the GCC target structure. */
3833
3834 struct gcc_target targetm = TARGET_INITIALIZER;
3835
3836 /* ------------------------------------------------------------------------ */