]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/nds32/nds32.c
Machine modes for address printing.
[thirdparty/gcc.git] / gcc / config / nds32 / nds32.c
1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* ------------------------------------------------------------------------ */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "optabs.h" /* For GEN_FCN. */
33 #include "regs.h"
34 #include "emit-rtl.h"
35 #include "recog.h"
36 #include "diagnostic-core.h"
37 #include "stor-layout.h"
38 #include "varasm.h"
39 #include "calls.h"
40 #include "output.h"
41 #include "explow.h"
42 #include "expr.h"
43 #include "tm-constrs.h"
44 #include "builtins.h"
45
46 /* This file should be included last. */
47 #include "target-def.h"
48
49 /* ------------------------------------------------------------------------ */
50
51 /* This file is divided into five parts:
52
53 PART 1: Auxiliary static variable definitions and
54 target hook static variable definitions.
55
56 PART 2: Auxiliary static function definitions.
57
58 PART 3: Implement target hook stuff definitions.
59
60 PART 4: Implemet extern function definitions,
61 the prototype is in nds32-protos.h.
62
63 PART 5: Initialize target hook structure and definitions. */
64
65 /* ------------------------------------------------------------------------ */
66
67 /* PART 1: Auxiliary static variable definitions and
68 target hook static variable definitions. */
69
70 /* Define intrinsic register names.
71 Please refer to nds32_intrinsic.h file, the index is corresponding to
72 'enum nds32_intrinsic_registers' data type values.
73 NOTE that the base value starting from 1024. */
74 static const char * const nds32_intrinsic_register_names[] =
75 {
76 "$PSW", "$IPSW", "$ITYPE", "$IPC"
77 };
78
79 /* Defining target-specific uses of __attribute__. */
80 static const struct attribute_spec nds32_attribute_table[] =
81 {
82 /* Syntax: { name, min_len, max_len, decl_required, type_required,
83 function_type_required, handler, affects_type_identity } */
84
85 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
86 { "interrupt", 1, 64, false, false, false, NULL, false },
87 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
88 { "exception", 1, 8, false, false, false, NULL, false },
89 /* Argument is user's interrupt numbers. The vector number is always 0. */
90 { "reset", 1, 1, false, false, false, NULL, false },
91
92 /* The attributes describing isr nested type. */
93 { "nested", 0, 0, false, false, false, NULL, false },
94 { "not_nested", 0, 0, false, false, false, NULL, false },
95 { "nested_ready", 0, 0, false, false, false, NULL, false },
96
97 /* The attributes describing isr register save scheme. */
98 { "save_all", 0, 0, false, false, false, NULL, false },
99 { "partial_save", 0, 0, false, false, false, NULL, false },
100
101 /* The attributes used by reset attribute. */
102 { "nmi", 1, 1, false, false, false, NULL, false },
103 { "warm", 1, 1, false, false, false, NULL, false },
104
105 /* The attribute telling no prologue/epilogue. */
106 { "naked", 0, 0, false, false, false, NULL, false },
107
108 /* The last attribute spec is set to be NULL. */
109 { NULL, 0, 0, false, false, false, NULL, false }
110 };
111
112
113 /* ------------------------------------------------------------------------ */
114
115 /* PART 2: Auxiliary static function definitions. */
116
117 /* Function to save and restore machine-specific function data. */
118 static struct machine_function *
119 nds32_init_machine_status (void)
120 {
121 struct machine_function *machine;
122 machine = ggc_cleared_alloc<machine_function> ();
123
124 /* Initially assume this function needs prologue/epilogue. */
125 machine->naked_p = 0;
126
127 /* Initially assume this function does NOT use fp_as_gp optimization. */
128 machine->fp_as_gp_p = 0;
129
130 return machine;
131 }
132
133 /* Function to compute stack frame size and
134 store into cfun->machine structure. */
135 static void
136 nds32_compute_stack_frame (void)
137 {
138 int r;
139 int block_size;
140
141 /* Because nds32_compute_stack_frame() will be called from different place,
142 everytime we enter this function, we have to assume this function
143 needs prologue/epilogue. */
144 cfun->machine->naked_p = 0;
145
146 /* Get variadic arguments size to prepare pretend arguments and
147 we will push them into stack at prologue by ourself. */
148 cfun->machine->va_args_size = crtl->args.pretend_args_size;
149 if (cfun->machine->va_args_size != 0)
150 {
151 cfun->machine->va_args_first_regno
152 = NDS32_GPR_ARG_FIRST_REGNUM
153 + NDS32_MAX_GPR_REGS_FOR_ARGS
154 - (crtl->args.pretend_args_size / UNITS_PER_WORD);
155 cfun->machine->va_args_last_regno
156 = NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS - 1;
157 }
158 else
159 {
160 cfun->machine->va_args_first_regno = SP_REGNUM;
161 cfun->machine->va_args_last_regno = SP_REGNUM;
162 }
163
164 /* Important: We need to make sure that varargs area is 8-byte alignment. */
165 block_size = cfun->machine->va_args_size;
166 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
167 {
168 cfun->machine->va_args_area_padding_bytes
169 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
170 }
171
172 /* Get local variables, incoming variables, and temporary variables size.
173 Note that we need to make sure it is 8-byte alignment because
174 there may be no padding bytes if we are using LRA. */
175 cfun->machine->local_size = NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
176
177 /* Get outgoing arguments size. */
178 cfun->machine->out_args_size = crtl->outgoing_args_size;
179
180 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
181 Check whether $fp is ever live. */
182 cfun->machine->fp_size = (df_regs_ever_live_p (FP_REGNUM)) ? 4 : 0;
183
184 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
185 Check whether we are using PIC code genration. */
186 cfun->machine->gp_size = (flag_pic) ? 4 : 0;
187
188 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
189 Check whether $lp is ever live. */
190 cfun->machine->lp_size = (df_regs_ever_live_p (LP_REGNUM)) ? 4 : 0;
191
192 /* Initially there is no padding bytes. */
193 cfun->machine->callee_saved_area_gpr_padding_bytes = 0;
194
195 /* Calculate the bytes of saving callee-saved registers on stack. */
196 cfun->machine->callee_saved_gpr_regs_size = 0;
197 cfun->machine->callee_saved_first_gpr_regno = SP_REGNUM;
198 cfun->machine->callee_saved_last_gpr_regno = SP_REGNUM;
199 /* Currently, there is no need to check $r28~$r31
200 because we will save them in another way. */
201 for (r = 0; r < 28; r++)
202 {
203 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
204 {
205 /* Mark the first required callee-saved register
206 (only need to set it once).
207 If first regno == SP_REGNUM, we can tell that
208 it is the first time to be here. */
209 if (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM)
210 cfun->machine->callee_saved_first_gpr_regno = r;
211 /* Mark the last required callee-saved register. */
212 cfun->machine->callee_saved_last_gpr_regno = r;
213 }
214 }
215
216 /* Check if this function can omit prologue/epilogue code fragment.
217 If there is 'naked' attribute in this function,
218 we can set 'naked_p' flag to indicate that
219 we do not have to generate prologue/epilogue.
220 Or, if all the following conditions succeed,
221 we can set this function 'naked_p' as well:
222 condition 1: first_regno == last_regno == SP_REGNUM,
223 which means we do not have to save
224 any callee-saved registers.
225 condition 2: Both $lp and $fp are NOT live in this function,
226 which means we do not need to save them and there
227 is no outgoing size.
228 condition 3: There is no local_size, which means
229 we do not need to adjust $sp. */
230 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
231 || (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM
232 && cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM
233 && !df_regs_ever_live_p (FP_REGNUM)
234 && !df_regs_ever_live_p (LP_REGNUM)
235 && cfun->machine->local_size == 0))
236 {
237 /* Set this function 'naked_p' and other functions can check this flag.
238 Note that in nds32 port, the 'naked_p = 1' JUST means there is no
239 callee-saved, local size, and outgoing size.
240 The varargs space and ret instruction may still present in
241 the prologue/epilogue expanding. */
242 cfun->machine->naked_p = 1;
243
244 /* No need to save $fp, $gp, and $lp.
245 We should set these value to be zero
246 so that nds32_initial_elimination_offset() can work properly. */
247 cfun->machine->fp_size = 0;
248 cfun->machine->gp_size = 0;
249 cfun->machine->lp_size = 0;
250
251 /* If stack usage computation is required,
252 we need to provide the static stack size. */
253 if (flag_stack_usage_info)
254 current_function_static_stack_size = 0;
255
256 /* No need to do following adjustment, return immediately. */
257 return;
258 }
259
260 /* Adjustment for v3push instructions:
261 If we are using v3push (push25/pop25) instructions,
262 we need to make sure Rb is $r6 and Re is
263 located on $r6, $r8, $r10, or $r14.
264 Some results above will be discarded and recomputed.
265 Note that it is only available under V3/V3M ISA and we
266 DO NOT setup following stuff for isr or variadic function. */
267 if (TARGET_V3PUSH
268 && !nds32_isr_function_p (current_function_decl)
269 && (cfun->machine->va_args_size == 0))
270 {
271 /* Recompute:
272 cfun->machine->fp_size
273 cfun->machine->gp_size
274 cfun->machine->lp_size
275 cfun->machine->callee_saved_regs_first_regno
276 cfun->machine->callee_saved_regs_last_regno */
277
278 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
279 cfun->machine->fp_size = 4;
280 cfun->machine->gp_size = 4;
281 cfun->machine->lp_size = 4;
282
283 /* Remember to set Rb = $r6. */
284 cfun->machine->callee_saved_first_gpr_regno = 6;
285
286 if (cfun->machine->callee_saved_last_gpr_regno <= 6)
287 {
288 /* Re = $r6 */
289 cfun->machine->callee_saved_last_gpr_regno = 6;
290 }
291 else if (cfun->machine->callee_saved_last_gpr_regno <= 8)
292 {
293 /* Re = $r8 */
294 cfun->machine->callee_saved_last_gpr_regno = 8;
295 }
296 else if (cfun->machine->callee_saved_last_gpr_regno <= 10)
297 {
298 /* Re = $r10 */
299 cfun->machine->callee_saved_last_gpr_regno = 10;
300 }
301 else if (cfun->machine->callee_saved_last_gpr_regno <= 14)
302 {
303 /* Re = $r14 */
304 cfun->machine->callee_saved_last_gpr_regno = 14;
305 }
306 else if (cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM)
307 {
308 /* If last_regno is SP_REGNUM, which means
309 it is never changed, so set it to Re = $r6. */
310 cfun->machine->callee_saved_last_gpr_regno = 6;
311 }
312 else
313 {
314 /* The program flow should not go here. */
315 gcc_unreachable ();
316 }
317 }
318
319 /* We have correctly set callee_saved_regs_first_regno
320 and callee_saved_regs_last_regno.
321 Initially, the callee_saved_regs_size is supposed to be 0.
322 As long as callee_saved_regs_last_regno is not SP_REGNUM,
323 we can update callee_saved_regs_size with new size. */
324 if (cfun->machine->callee_saved_last_gpr_regno != SP_REGNUM)
325 {
326 /* Compute pushed size of callee-saved registers. */
327 cfun->machine->callee_saved_gpr_regs_size
328 = 4 * (cfun->machine->callee_saved_last_gpr_regno
329 - cfun->machine->callee_saved_first_gpr_regno
330 + 1);
331 }
332
333 /* Important: We need to make sure that
334 (fp_size + gp_size + lp_size + callee_saved_regs_size)
335 is 8-byte alignment.
336 If it is not, calculate the padding bytes. */
337 block_size = cfun->machine->fp_size
338 + cfun->machine->gp_size
339 + cfun->machine->lp_size
340 + cfun->machine->callee_saved_gpr_regs_size;
341 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
342 {
343 cfun->machine->callee_saved_area_gpr_padding_bytes
344 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
345 }
346
347 /* If stack usage computation is required,
348 we need to provide the static stack size. */
349 if (flag_stack_usage_info)
350 {
351 current_function_static_stack_size
352 = NDS32_ROUND_UP_DOUBLE_WORD (block_size)
353 + cfun->machine->local_size
354 + cfun->machine->out_args_size;
355 }
356 }
357
358 /* Function to create a parallel rtx pattern
359 which presents stack push multiple behavior.
360 The overall concept are:
361 "push registers to memory",
362 "adjust stack pointer". */
363 static void
364 nds32_emit_stack_push_multiple (rtx Rb, rtx Re, rtx En4, bool vaarg_p)
365 {
366 int regno;
367 int extra_count;
368 int num_use_regs;
369 int par_index;
370 int offset;
371 int save_fp, save_gp, save_lp;
372
373 rtx reg;
374 rtx mem;
375 rtx push_rtx;
376 rtx adjust_sp_rtx;
377 rtx parallel_insn;
378 rtx dwarf;
379
380 /* We need to provide a customized rtx which contains
381 necessary information for data analysis,
382 so we create a parallel rtx like this:
383 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
384 (reg:SI Rb))
385 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
386 (reg:SI Rb+1))
387 ...
388 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
389 (reg:SI Re))
390 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
391 (reg:SI FP_REGNUM))
392 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
393 (reg:SI GP_REGNUM))
394 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
395 (reg:SI LP_REGNUM))
396 (set (reg:SI SP_REGNUM)
397 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
398
399 /* Determine whether we need to save $fp, $gp, or $lp. */
400 save_fp = INTVAL (En4) & 0x8;
401 save_gp = INTVAL (En4) & 0x4;
402 save_lp = INTVAL (En4) & 0x2;
403
404 /* Calculate the number of registers that will be pushed. */
405 extra_count = 0;
406 if (save_fp)
407 extra_count++;
408 if (save_gp)
409 extra_count++;
410 if (save_lp)
411 extra_count++;
412 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
413 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
414 num_use_regs = extra_count;
415 else
416 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
417
418 /* In addition to used registers,
419 we need one more space for (set sp sp-x) rtx. */
420 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
421 rtvec_alloc (num_use_regs + 1));
422 par_index = 0;
423
424 /* Initialize offset and start to create push behavior. */
425 offset = -(num_use_regs * 4);
426
427 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
428 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
429 {
430 /* Rb and Re may be SP_REGNUM.
431 We need to break this loop immediately. */
432 if (regno == SP_REGNUM)
433 break;
434
435 reg = gen_rtx_REG (SImode, regno);
436 mem = gen_frame_mem (SImode, plus_constant (Pmode,
437 stack_pointer_rtx,
438 offset));
439 push_rtx = gen_rtx_SET (mem, reg);
440 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
441 RTX_FRAME_RELATED_P (push_rtx) = 1;
442 offset = offset + 4;
443 par_index++;
444 }
445
446 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
447 if (save_fp)
448 {
449 reg = gen_rtx_REG (SImode, FP_REGNUM);
450 mem = gen_frame_mem (SImode, plus_constant (Pmode,
451 stack_pointer_rtx,
452 offset));
453 push_rtx = gen_rtx_SET (mem, reg);
454 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
455 RTX_FRAME_RELATED_P (push_rtx) = 1;
456 offset = offset + 4;
457 par_index++;
458 }
459 if (save_gp)
460 {
461 reg = gen_rtx_REG (SImode, GP_REGNUM);
462 mem = gen_frame_mem (SImode, plus_constant (Pmode,
463 stack_pointer_rtx,
464 offset));
465 push_rtx = gen_rtx_SET (mem, reg);
466 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
467 RTX_FRAME_RELATED_P (push_rtx) = 1;
468 offset = offset + 4;
469 par_index++;
470 }
471 if (save_lp)
472 {
473 reg = gen_rtx_REG (SImode, LP_REGNUM);
474 mem = gen_frame_mem (SImode, plus_constant (Pmode,
475 stack_pointer_rtx,
476 offset));
477 push_rtx = gen_rtx_SET (mem, reg);
478 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
479 RTX_FRAME_RELATED_P (push_rtx) = 1;
480 offset = offset + 4;
481 par_index++;
482 }
483
484 /* Create (set sp sp-x). */
485
486 /* We need to re-calculate the offset value again for adjustment. */
487 offset = -(num_use_regs * 4);
488 adjust_sp_rtx
489 = gen_rtx_SET (stack_pointer_rtx,
490 plus_constant (Pmode, stack_pointer_rtx, offset));
491 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
492 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
493
494 parallel_insn = emit_insn (parallel_insn);
495
496 /* The insn rtx 'parallel_insn' will change frame layout.
497 We need to use RTX_FRAME_RELATED_P so that GCC is able to
498 generate CFI (Call Frame Information) stuff. */
499 RTX_FRAME_RELATED_P (parallel_insn) = 1;
500
501 /* Don't use GCC's logic for CFI info if we are generate a push for VAARG
502 since we will not restore those register at epilogue. */
503 if (vaarg_p)
504 {
505 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA,
506 copy_rtx (adjust_sp_rtx), NULL_RTX);
507 REG_NOTES (parallel_insn) = dwarf;
508 }
509 }
510
511 /* Function to create a parallel rtx pattern
512 which presents stack pop multiple behavior.
513 The overall concept are:
514 "pop registers from memory",
515 "adjust stack pointer". */
516 static void
517 nds32_emit_stack_pop_multiple (rtx Rb, rtx Re, rtx En4)
518 {
519 int regno;
520 int extra_count;
521 int num_use_regs;
522 int par_index;
523 int offset;
524 int save_fp, save_gp, save_lp;
525
526 rtx reg;
527 rtx mem;
528 rtx pop_rtx;
529 rtx adjust_sp_rtx;
530 rtx parallel_insn;
531 rtx dwarf = NULL_RTX;
532
533 /* We need to provide a customized rtx which contains
534 necessary information for data analysis,
535 so we create a parallel rtx like this:
536 (parallel [(set (reg:SI Rb)
537 (mem (reg:SI SP_REGNUM)))
538 (set (reg:SI Rb+1)
539 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
540 ...
541 (set (reg:SI Re)
542 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
543 (set (reg:SI FP_REGNUM)
544 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
545 (set (reg:SI GP_REGNUM)
546 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
547 (set (reg:SI LP_REGNUM)
548 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
549 (set (reg:SI SP_REGNUM)
550 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
551
552 /* Determine whether we need to restore $fp, $gp, or $lp. */
553 save_fp = INTVAL (En4) & 0x8;
554 save_gp = INTVAL (En4) & 0x4;
555 save_lp = INTVAL (En4) & 0x2;
556
557 /* Calculate the number of registers that will be poped. */
558 extra_count = 0;
559 if (save_fp)
560 extra_count++;
561 if (save_gp)
562 extra_count++;
563 if (save_lp)
564 extra_count++;
565 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
566 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
567 num_use_regs = extra_count;
568 else
569 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
570
571 /* In addition to used registers,
572 we need one more space for (set sp sp+x) rtx. */
573 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
574 rtvec_alloc (num_use_regs + 1));
575 par_index = 0;
576
577 /* Initialize offset and start to create pop behavior. */
578 offset = 0;
579
580 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
581 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
582 {
583 /* Rb and Re may be SP_REGNUM.
584 We need to break this loop immediately. */
585 if (regno == SP_REGNUM)
586 break;
587
588 reg = gen_rtx_REG (SImode, regno);
589 mem = gen_frame_mem (SImode, plus_constant (Pmode,
590 stack_pointer_rtx,
591 offset));
592 pop_rtx = gen_rtx_SET (reg, mem);
593 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
594 RTX_FRAME_RELATED_P (pop_rtx) = 1;
595 offset = offset + 4;
596 par_index++;
597
598 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
599 }
600
601 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
602 if (save_fp)
603 {
604 reg = gen_rtx_REG (SImode, FP_REGNUM);
605 mem = gen_frame_mem (SImode, plus_constant (Pmode,
606 stack_pointer_rtx,
607 offset));
608 pop_rtx = gen_rtx_SET (reg, mem);
609 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
610 RTX_FRAME_RELATED_P (pop_rtx) = 1;
611 offset = offset + 4;
612 par_index++;
613
614 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
615 }
616 if (save_gp)
617 {
618 reg = gen_rtx_REG (SImode, GP_REGNUM);
619 mem = gen_frame_mem (SImode, plus_constant (Pmode,
620 stack_pointer_rtx,
621 offset));
622 pop_rtx = gen_rtx_SET (reg, mem);
623 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
624 RTX_FRAME_RELATED_P (pop_rtx) = 1;
625 offset = offset + 4;
626 par_index++;
627
628 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
629 }
630 if (save_lp)
631 {
632 reg = gen_rtx_REG (SImode, LP_REGNUM);
633 mem = gen_frame_mem (SImode, plus_constant (Pmode,
634 stack_pointer_rtx,
635 offset));
636 pop_rtx = gen_rtx_SET (reg, mem);
637 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
638 RTX_FRAME_RELATED_P (pop_rtx) = 1;
639 offset = offset + 4;
640 par_index++;
641
642 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
643 }
644
645 /* Create (set sp sp+x). */
646
647 /* The offset value is already in place. No need to re-calculate it. */
648 adjust_sp_rtx
649 = gen_rtx_SET (stack_pointer_rtx,
650 plus_constant (Pmode, stack_pointer_rtx, offset));
651 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
652
653 /* Tell gcc we adjust SP in this insn. */
654 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
655
656 parallel_insn = emit_insn (parallel_insn);
657
658 /* The insn rtx 'parallel_insn' will change frame layout.
659 We need to use RTX_FRAME_RELATED_P so that GCC is able to
660 generate CFI (Call Frame Information) stuff. */
661 RTX_FRAME_RELATED_P (parallel_insn) = 1;
662
663 /* Add CFI info by manual. */
664 REG_NOTES (parallel_insn) = dwarf;
665 }
666
667 /* Function to create a parallel rtx pattern
668 which presents stack v3push behavior.
669 The overall concept are:
670 "push registers to memory",
671 "adjust stack pointer". */
672 static void
673 nds32_emit_stack_v3push (rtx Rb,
674 rtx Re,
675 rtx En4 ATTRIBUTE_UNUSED,
676 rtx imm8u)
677 {
678 int regno;
679 int num_use_regs;
680 int par_index;
681 int offset;
682
683 rtx reg;
684 rtx mem;
685 rtx push_rtx;
686 rtx adjust_sp_rtx;
687 rtx parallel_insn;
688
689 /* We need to provide a customized rtx which contains
690 necessary information for data analysis,
691 so we create a parallel rtx like this:
692 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
693 (reg:SI Rb))
694 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
695 (reg:SI Rb+1))
696 ...
697 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
698 (reg:SI Re))
699 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
700 (reg:SI FP_REGNUM))
701 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
702 (reg:SI GP_REGNUM))
703 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
704 (reg:SI LP_REGNUM))
705 (set (reg:SI SP_REGNUM)
706 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
707
708 /* Calculate the number of registers that will be pushed.
709 Since $fp, $gp, and $lp is always pushed with v3push instruction,
710 we need to count these three registers.
711 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
712 So there is no need to worry about Rb=Re=SP_REGNUM case. */
713 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
714
715 /* In addition to used registers,
716 we need one more space for (set sp sp-x-imm8u) rtx. */
717 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
718 rtvec_alloc (num_use_regs + 1));
719 par_index = 0;
720
721 /* Initialize offset and start to create push behavior. */
722 offset = -(num_use_regs * 4);
723
724 /* Create (set mem regX) from Rb, Rb+1 up to Re.
725 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
726 So there is no need to worry about Rb=Re=SP_REGNUM case. */
727 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
728 {
729 reg = gen_rtx_REG (SImode, regno);
730 mem = gen_frame_mem (SImode, plus_constant (Pmode,
731 stack_pointer_rtx,
732 offset));
733 push_rtx = gen_rtx_SET (mem, reg);
734 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
735 RTX_FRAME_RELATED_P (push_rtx) = 1;
736 offset = offset + 4;
737 par_index++;
738 }
739
740 /* Create (set mem fp). */
741 reg = gen_rtx_REG (SImode, FP_REGNUM);
742 mem = gen_frame_mem (SImode, plus_constant (Pmode,
743 stack_pointer_rtx,
744 offset));
745 push_rtx = gen_rtx_SET (mem, reg);
746 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
747 RTX_FRAME_RELATED_P (push_rtx) = 1;
748 offset = offset + 4;
749 par_index++;
750 /* Create (set mem gp). */
751 reg = gen_rtx_REG (SImode, GP_REGNUM);
752 mem = gen_frame_mem (SImode, plus_constant (Pmode,
753 stack_pointer_rtx,
754 offset));
755 push_rtx = gen_rtx_SET (mem, reg);
756 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
757 RTX_FRAME_RELATED_P (push_rtx) = 1;
758 offset = offset + 4;
759 par_index++;
760 /* Create (set mem lp). */
761 reg = gen_rtx_REG (SImode, LP_REGNUM);
762 mem = gen_frame_mem (SImode, plus_constant (Pmode,
763 stack_pointer_rtx,
764 offset));
765 push_rtx = gen_rtx_SET (mem, reg);
766 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
767 RTX_FRAME_RELATED_P (push_rtx) = 1;
768 offset = offset + 4;
769 par_index++;
770
771 /* Create (set sp sp-x-imm8u). */
772
773 /* We need to re-calculate the offset value again for adjustment. */
774 offset = -(num_use_regs * 4);
775 adjust_sp_rtx
776 = gen_rtx_SET (stack_pointer_rtx,
777 plus_constant (Pmode,
778 stack_pointer_rtx,
779 offset - INTVAL (imm8u)));
780 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
781 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
782
783 parallel_insn = emit_insn (parallel_insn);
784
785 /* The insn rtx 'parallel_insn' will change frame layout.
786 We need to use RTX_FRAME_RELATED_P so that GCC is able to
787 generate CFI (Call Frame Information) stuff. */
788 RTX_FRAME_RELATED_P (parallel_insn) = 1;
789 }
790
791 /* Function to create a parallel rtx pattern
792 which presents stack v3pop behavior.
793 The overall concept are:
794 "pop registers from memory",
795 "adjust stack pointer". */
796 static void
797 nds32_emit_stack_v3pop (rtx Rb,
798 rtx Re,
799 rtx En4 ATTRIBUTE_UNUSED,
800 rtx imm8u)
801 {
802 int regno;
803 int num_use_regs;
804 int par_index;
805 int offset;
806
807 rtx reg;
808 rtx mem;
809 rtx pop_rtx;
810 rtx adjust_sp_rtx;
811 rtx parallel_insn;
812 rtx dwarf = NULL_RTX;
813
814 /* We need to provide a customized rtx which contains
815 necessary information for data analysis,
816 so we create a parallel rtx like this:
817 (parallel [(set (reg:SI Rb)
818 (mem (reg:SI SP_REGNUM)))
819 (set (reg:SI Rb+1)
820 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
821 ...
822 (set (reg:SI Re)
823 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
824 (set (reg:SI FP_REGNUM)
825 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
826 (set (reg:SI GP_REGNUM)
827 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
828 (set (reg:SI LP_REGNUM)
829 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
830 (set (reg:SI SP_REGNUM)
831 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
832
833 /* Calculate the number of registers that will be poped.
834 Since $fp, $gp, and $lp is always poped with v3pop instruction,
835 we need to count these three registers.
836 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
837 So there is no need to worry about Rb=Re=SP_REGNUM case. */
838 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
839
840 /* In addition to used registers,
841 we need one more space for (set sp sp+x+imm8u) rtx. */
842 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
843 rtvec_alloc (num_use_regs + 1));
844 par_index = 0;
845
846 /* Initialize offset and start to create pop behavior. */
847 offset = 0;
848
849 /* Create (set regX mem) from Rb, Rb+1 up to Re.
850 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
851 So there is no need to worry about Rb=Re=SP_REGNUM case. */
852 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
853 {
854 reg = gen_rtx_REG (SImode, regno);
855 mem = gen_frame_mem (SImode, plus_constant (Pmode,
856 stack_pointer_rtx,
857 offset));
858 pop_rtx = gen_rtx_SET (reg, mem);
859 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
860 RTX_FRAME_RELATED_P (pop_rtx) = 1;
861 offset = offset + 4;
862 par_index++;
863
864 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
865 }
866
867 /* Create (set fp mem). */
868 reg = gen_rtx_REG (SImode, FP_REGNUM);
869 mem = gen_frame_mem (SImode, plus_constant (Pmode,
870 stack_pointer_rtx,
871 offset));
872 pop_rtx = gen_rtx_SET (reg, mem);
873 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
874 RTX_FRAME_RELATED_P (pop_rtx) = 1;
875 offset = offset + 4;
876 par_index++;
877 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
878
879 /* Create (set gp mem). */
880 reg = gen_rtx_REG (SImode, GP_REGNUM);
881 mem = gen_frame_mem (SImode, plus_constant (Pmode,
882 stack_pointer_rtx,
883 offset));
884 pop_rtx = gen_rtx_SET (reg, mem);
885 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
886 RTX_FRAME_RELATED_P (pop_rtx) = 1;
887 offset = offset + 4;
888 par_index++;
889 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
890
891 /* Create (set lp mem ). */
892 reg = gen_rtx_REG (SImode, LP_REGNUM);
893 mem = gen_frame_mem (SImode, plus_constant (Pmode,
894 stack_pointer_rtx,
895 offset));
896 pop_rtx = gen_rtx_SET (reg, mem);
897 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
898 RTX_FRAME_RELATED_P (pop_rtx) = 1;
899 offset = offset + 4;
900 par_index++;
901 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
902
903 /* Create (set sp sp+x+imm8u). */
904
905 /* The offset value is already in place. No need to re-calculate it. */
906 adjust_sp_rtx
907 = gen_rtx_SET (stack_pointer_rtx,
908 plus_constant (Pmode,
909 stack_pointer_rtx,
910 offset + INTVAL (imm8u)));
911 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
912
913 /* Tell gcc we adjust SP in this insn. */
914 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
915
916 parallel_insn = emit_insn (parallel_insn);
917
918 /* The insn rtx 'parallel_insn' will change frame layout.
919 We need to use RTX_FRAME_RELATED_P so that GCC is able to
920 generate CFI (Call Frame Information) stuff. */
921 RTX_FRAME_RELATED_P (parallel_insn) = 1;
922
923 /* Add CFI info by manual. */
924 REG_NOTES (parallel_insn) = dwarf;
925 }
926
927 /* Function that may creates more instructions
928 for large value on adjusting stack pointer.
929
930 In nds32 target, 'addi' can be used for stack pointer
931 adjustment in prologue/epilogue stage.
932 However, sometimes there are too many local variables so that
933 the adjustment value is not able to be fit in the 'addi' instruction.
934 One solution is to move value into a register
935 and then use 'add' instruction.
936 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
937 Also, we need to return zero for sp adjustment so that
938 proglogue/epilogue knows there is no need to create 'addi' instruction. */
939 static int
940 nds32_force_addi_stack_int (int full_value)
941 {
942 int adjust_value;
943
944 rtx tmp_reg;
945 rtx sp_adjust_insn;
946
947 if (!satisfies_constraint_Is15 (GEN_INT (full_value)))
948 {
949 /* The value is not able to fit in single addi instruction.
950 Create more instructions of moving value into a register
951 and then add stack pointer with it. */
952
953 /* $r15 is going to be temporary register to hold the value. */
954 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
955
956 /* Create one more instruction to move value
957 into the temporary register. */
958 emit_move_insn (tmp_reg, GEN_INT (full_value));
959
960 /* Create new 'add' rtx. */
961 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
962 stack_pointer_rtx,
963 tmp_reg);
964 /* Emit rtx into insn list and receive its transformed insn rtx. */
965 sp_adjust_insn = emit_insn (sp_adjust_insn);
966
967 /* At prologue, we need to tell GCC that this is frame related insn,
968 so that we can consider this instruction to output debug information.
969 If full_value is NEGATIVE, it means this function
970 is invoked by expand_prologue. */
971 if (full_value < 0)
972 {
973 /* Because (tmp_reg <- full_value) may be split into two
974 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
975 We need to construct another (sp <- sp + full_value)
976 and then insert it into sp_adjust_insn's reg note to
977 represent a frame related expression.
978 GCC knows how to refer it and output debug information. */
979
980 rtx plus_rtx;
981 rtx set_rtx;
982
983 plus_rtx = plus_constant (Pmode, stack_pointer_rtx, full_value);
984 set_rtx = gen_rtx_SET (stack_pointer_rtx, plus_rtx);
985 add_reg_note (sp_adjust_insn, REG_FRAME_RELATED_EXPR, set_rtx);
986
987 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
988 }
989
990 /* We have used alternative way to adjust stack pointer value.
991 Return zero so that prologue/epilogue
992 will not generate other instructions. */
993 return 0;
994 }
995 else
996 {
997 /* The value is able to fit in addi instruction.
998 However, remember to make it to be positive value
999 because we want to return 'adjustment' result. */
1000 adjust_value = (full_value < 0) ? (-full_value) : (full_value);
1001
1002 return adjust_value;
1003 }
1004 }
1005
1006 /* Return true if MODE/TYPE need double word alignment. */
1007 static bool
1008 nds32_needs_double_word_align (machine_mode mode, const_tree type)
1009 {
1010 unsigned int align;
1011
1012 /* Pick up the alignment according to the mode or type. */
1013 align = NDS32_MODE_TYPE_ALIGN (mode, type);
1014
1015 return (align > PARM_BOUNDARY);
1016 }
1017
1018 /* Return true if FUNC is a naked function. */
1019 static bool
1020 nds32_naked_function_p (tree func)
1021 {
1022 tree t;
1023
1024 if (TREE_CODE (func) != FUNCTION_DECL)
1025 abort ();
1026
1027 t = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
1028
1029 return (t != NULL_TREE);
1030 }
1031
1032 /* Function that check if 'X' is a valid address register.
1033 The variable 'STRICT' is very important to
1034 make decision for register number.
1035
1036 STRICT : true
1037 => We are in reload pass or after reload pass.
1038 The register number should be strictly limited in general registers.
1039
1040 STRICT : false
1041 => Before reload pass, we are free to use any register number. */
1042 static bool
1043 nds32_address_register_rtx_p (rtx x, bool strict)
1044 {
1045 int regno;
1046
1047 if (GET_CODE (x) != REG)
1048 return false;
1049
1050 regno = REGNO (x);
1051
1052 if (strict)
1053 return REGNO_OK_FOR_BASE_P (regno);
1054 else
1055 return true;
1056 }
1057
1058 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1059
1060 OUTER_MODE : Machine mode of outer address rtx.
1061 INDEX : Check if this rtx is valid to be a index for address.
1062 STRICT : If it is true, we are in reload pass or after reload pass. */
1063 static bool
1064 nds32_legitimate_index_p (machine_mode outer_mode,
1065 rtx index,
1066 bool strict)
1067 {
1068 int regno;
1069 rtx op0;
1070 rtx op1;
1071
1072 switch (GET_CODE (index))
1073 {
1074 case REG:
1075 regno = REGNO (index);
1076 /* If we are in reload pass or after reload pass,
1077 we need to limit it to general register. */
1078 if (strict)
1079 return REGNO_OK_FOR_INDEX_P (regno);
1080 else
1081 return true;
1082
1083 case CONST_INT:
1084 /* The alignment of the integer value is determined by 'outer_mode'. */
1085 if (GET_MODE_SIZE (outer_mode) == 1)
1086 {
1087 /* Further check if the value is legal for the 'outer_mode'. */
1088 if (!satisfies_constraint_Is15 (index))
1089 return false;
1090
1091 /* Pass all test, the value is valid, return true. */
1092 return true;
1093 }
1094 if (GET_MODE_SIZE (outer_mode) == 2
1095 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index)))
1096 {
1097 /* Further check if the value is legal for the 'outer_mode'. */
1098 if (!satisfies_constraint_Is16 (index))
1099 return false;
1100
1101 /* Pass all test, the value is valid, return true. */
1102 return true;
1103 }
1104 if (GET_MODE_SIZE (outer_mode) == 4
1105 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1106 {
1107 /* Further check if the value is legal for the 'outer_mode'. */
1108 if (!satisfies_constraint_Is17 (index))
1109 return false;
1110
1111 /* Pass all test, the value is valid, return true. */
1112 return true;
1113 }
1114 if (GET_MODE_SIZE (outer_mode) == 8
1115 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1116 {
1117 /* Further check if the value is legal for the 'outer_mode'. */
1118 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index) + 4,
1119 SImode)))
1120 return false;
1121
1122 /* Pass all test, the value is valid, return true. */
1123 return true;
1124 }
1125
1126 return false;
1127
1128 case MULT:
1129 op0 = XEXP (index, 0);
1130 op1 = XEXP (index, 1);
1131
1132 if (REG_P (op0) && CONST_INT_P (op1))
1133 {
1134 int multiplier;
1135 multiplier = INTVAL (op1);
1136
1137 /* We only allow (mult reg const_int_1)
1138 or (mult reg const_int_2) or (mult reg const_int_4). */
1139 if (multiplier != 1 && multiplier != 2 && multiplier != 4)
1140 return false;
1141
1142 regno = REGNO (op0);
1143 /* Limit it in general registers if we are
1144 in reload pass or after reload pass. */
1145 if(strict)
1146 return REGNO_OK_FOR_INDEX_P (regno);
1147 else
1148 return true;
1149 }
1150
1151 return false;
1152
1153 case ASHIFT:
1154 op0 = XEXP (index, 0);
1155 op1 = XEXP (index, 1);
1156
1157 if (REG_P (op0) && CONST_INT_P (op1))
1158 {
1159 int sv;
1160 /* op1 is already the sv value for use to do left shift. */
1161 sv = INTVAL (op1);
1162
1163 /* We only allow (ashift reg const_int_0)
1164 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1165 if (sv != 0 && sv != 1 && sv !=2)
1166 return false;
1167
1168 regno = REGNO (op0);
1169 /* Limit it in general registers if we are
1170 in reload pass or after reload pass. */
1171 if(strict)
1172 return REGNO_OK_FOR_INDEX_P (regno);
1173 else
1174 return true;
1175 }
1176
1177 return false;
1178
1179 default:
1180 return false;
1181 }
1182 }
1183
1184 /* ------------------------------------------------------------------------ */
1185
1186 /* PART 3: Implement target hook stuff definitions. */
1187 \f
1188 /* Register Classes. */
1189
1190 static unsigned char
1191 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1192 machine_mode mode)
1193 {
1194 /* Return the maximum number of consecutive registers
1195 needed to represent "mode" in a register of "rclass". */
1196 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
1197 }
1198
1199 static int
1200 nds32_register_priority (int hard_regno)
1201 {
1202 /* Encourage to use r0-r7 for LRA when optimize for size. */
1203 if (optimize_size && hard_regno < 8)
1204 return 4;
1205 return 3;
1206 }
1207
1208 \f
1209 /* Stack Layout and Calling Conventions. */
1210
1211 /* There are three kinds of pointer concepts using in GCC compiler:
1212
1213 frame pointer: A pointer to the first location of local variables.
1214 stack pointer: A pointer to the top of a stack frame.
1215 argument pointer: A pointer to the incoming arguments.
1216
1217 In nds32 target calling convention, we are using 8-byte alignment.
1218 Besides, we would like to have each stack frame of a function includes:
1219
1220 [Block A]
1221 1. previous hard frame pointer
1222 2. return address
1223 3. callee-saved registers
1224 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1225 and save it at
1226 cfun->machine->callee_saved_area_padding_bytes)
1227
1228 [Block B]
1229 1. local variables
1230 2. spilling location
1231 3. <padding bytes> (it will be calculated by GCC itself)
1232 4. incoming arguments
1233 5. <padding bytes> (it will be calculated by GCC itself)
1234
1235 [Block C]
1236 1. <padding bytes> (it will be calculated by GCC itself)
1237 2. outgoing arguments
1238
1239 We 'wrap' these blocks together with
1240 hard frame pointer ($r28) and stack pointer ($r31).
1241 By applying the basic frame/stack/argument pointers concept,
1242 the layout of a stack frame shoule be like this:
1243
1244 | |
1245 old stack pointer -> ----
1246 | | \
1247 | | saved arguments for
1248 | | vararg functions
1249 | | /
1250 hard frame pointer -> --
1251 & argument pointer | | \
1252 | | previous hardware frame pointer
1253 | | return address
1254 | | callee-saved registers
1255 | | /
1256 frame pointer -> --
1257 | | \
1258 | | local variables
1259 | | and incoming arguments
1260 | | /
1261 --
1262 | | \
1263 | | outgoing
1264 | | arguments
1265 | | /
1266 stack pointer -> ----
1267
1268 $SFP and $AP are used to represent frame pointer and arguments pointer,
1269 which will be both eliminated as hard frame pointer. */
1270
1271 /* -- Eliminating Frame Pointer and Arg Pointer. */
1272
1273 static bool
1274 nds32_can_eliminate (const int from_reg, const int to_reg)
1275 {
1276 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1277 return true;
1278
1279 if (from_reg == ARG_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1280 return true;
1281
1282 if (from_reg == FRAME_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1283 return true;
1284
1285 if (from_reg == FRAME_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1286 return true;
1287
1288 return false;
1289 }
1290
1291 /* -- Passing Arguments in Registers. */
1292
1293 static rtx
1294 nds32_function_arg (cumulative_args_t ca, machine_mode mode,
1295 const_tree type, bool named)
1296 {
1297 unsigned int regno;
1298 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1299
1300 /* The last time this hook is called,
1301 it is called with MODE == VOIDmode. */
1302 if (mode == VOIDmode)
1303 return NULL_RTX;
1304
1305 /* For nameless arguments, we need to take care it individually. */
1306 if (!named)
1307 {
1308 /* If we are under hard float abi, we have arguments passed on the
1309 stack and all situation can be handled by GCC itself. */
1310 if (TARGET_HARD_FLOAT)
1311 return NULL_RTX;
1312
1313 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1314 {
1315 /* If we still have enough registers to pass argument, pick up
1316 next available register number. */
1317 regno
1318 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1319 return gen_rtx_REG (mode, regno);
1320 }
1321
1322 /* No register available, return NULL_RTX.
1323 The compiler will use stack to pass argument instead. */
1324 return NULL_RTX;
1325 }
1326
1327 /* The following is to handle named argument.
1328 Note that the strategies of TARGET_HARD_FLOAT and !TARGET_HARD_FLOAT
1329 are different. */
1330 if (TARGET_HARD_FLOAT)
1331 {
1332 /* Currently we have not implemented hard float yet. */
1333 gcc_unreachable ();
1334 }
1335 else
1336 {
1337 /* For !TARGET_HARD_FLOAT calling convention, we always use GPR to pass
1338 argument. Since we allow to pass argument partially in registers,
1339 we can just return it if there are still registers available. */
1340 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1341 {
1342 /* Pick up the next available register number. */
1343 regno
1344 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1345 return gen_rtx_REG (mode, regno);
1346 }
1347
1348 }
1349
1350 /* No register available, return NULL_RTX.
1351 The compiler will use stack to pass argument instead. */
1352 return NULL_RTX;
1353 }
1354
1355 static bool
1356 nds32_must_pass_in_stack (machine_mode mode, const_tree type)
1357 {
1358 /* Return true if a type must be passed in memory.
1359 If it is NOT using hard float abi, small aggregates can be
1360 passed in a register even we are calling a variadic function.
1361 So there is no need to take padding into consideration. */
1362 if (TARGET_HARD_FLOAT)
1363 return must_pass_in_stack_var_size_or_pad (mode, type);
1364 else
1365 return must_pass_in_stack_var_size (mode, type);
1366 }
1367
1368 static int
1369 nds32_arg_partial_bytes (cumulative_args_t ca, machine_mode mode,
1370 tree type, bool named ATTRIBUTE_UNUSED)
1371 {
1372 /* Returns the number of bytes at the beginning of an argument that
1373 must be put in registers. The value must be zero for arguments that are
1374 passed entirely in registers or that are entirely pushed on the stack.
1375 Besides, TARGET_FUNCTION_ARG for these arguments should return the
1376 first register to be used by the caller for this argument. */
1377 unsigned int needed_reg_count;
1378 unsigned int remaining_reg_count;
1379 CUMULATIVE_ARGS *cum;
1380
1381 cum = get_cumulative_args (ca);
1382
1383 /* Under hard float abi, we better have argument entirely passed in
1384 registers or pushed on the stack so that we can reduce the complexity
1385 of dealing with cum->gpr_offset and cum->fpr_offset. */
1386 if (TARGET_HARD_FLOAT)
1387 return 0;
1388
1389 /* If we have already runned out of argument registers, return zero
1390 so that the argument will be entirely pushed on the stack. */
1391 if (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1392 >= NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS)
1393 return 0;
1394
1395 /* Calculate how many registers do we need for this argument. */
1396 needed_reg_count = NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1397
1398 /* Calculate how many argument registers have left for passing argument.
1399 Note that we should count it from next available register number. */
1400 remaining_reg_count
1401 = NDS32_MAX_GPR_REGS_FOR_ARGS
1402 - (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1403 - NDS32_GPR_ARG_FIRST_REGNUM);
1404
1405 /* Note that we have to return the nubmer of bytes, not registers count. */
1406 if (needed_reg_count > remaining_reg_count)
1407 return remaining_reg_count * UNITS_PER_WORD;
1408
1409 return 0;
1410 }
1411
1412 static void
1413 nds32_function_arg_advance (cumulative_args_t ca, machine_mode mode,
1414 const_tree type, bool named)
1415 {
1416 machine_mode sub_mode;
1417 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1418
1419 if (named)
1420 {
1421 /* We need to further check TYPE and MODE so that we can determine
1422 which kind of register we shall advance. */
1423 if (type && TREE_CODE (type) == COMPLEX_TYPE)
1424 sub_mode = TYPE_MODE (TREE_TYPE (type));
1425 else
1426 sub_mode = mode;
1427
1428 /* Under hard float abi, we may advance FPR registers. */
1429 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (sub_mode) == MODE_FLOAT)
1430 {
1431 /* Currently we have not implemented hard float yet. */
1432 gcc_unreachable ();
1433 }
1434 else
1435 {
1436 cum->gpr_offset
1437 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1438 - NDS32_GPR_ARG_FIRST_REGNUM
1439 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1440 }
1441 }
1442 else
1443 {
1444 /* If this nameless argument is NOT under TARGET_HARD_FLOAT,
1445 we can advance next register as well so that caller is
1446 able to pass arguments in registers and callee must be
1447 in charge of pushing all of them into stack. */
1448 if (!TARGET_HARD_FLOAT)
1449 {
1450 cum->gpr_offset
1451 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1452 - NDS32_GPR_ARG_FIRST_REGNUM
1453 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1454 }
1455 }
1456 }
1457
1458 static unsigned int
1459 nds32_function_arg_boundary (machine_mode mode, const_tree type)
1460 {
1461 return (nds32_needs_double_word_align (mode, type)
1462 ? NDS32_DOUBLE_WORD_ALIGNMENT
1463 : PARM_BOUNDARY);
1464 }
1465
1466 /* -- How Scalar Function Values Are Returned. */
1467
1468 static rtx
1469 nds32_function_value (const_tree ret_type,
1470 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1471 bool outgoing ATTRIBUTE_UNUSED)
1472 {
1473 machine_mode mode;
1474 int unsignedp;
1475
1476 mode = TYPE_MODE (ret_type);
1477 unsignedp = TYPE_UNSIGNED (ret_type);
1478
1479 mode = promote_mode (ret_type, mode, &unsignedp);
1480
1481 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1482 }
1483
1484 static rtx
1485 nds32_libcall_value (machine_mode mode,
1486 const_rtx fun ATTRIBUTE_UNUSED)
1487 {
1488 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1489 }
1490
1491 static bool
1492 nds32_function_value_regno_p (const unsigned int regno)
1493 {
1494 return (regno == NDS32_GPR_RET_FIRST_REGNUM);
1495 }
1496
1497 /* -- Function Entry and Exit. */
1498
1499 /* The content produced from this function
1500 will be placed before prologue body. */
1501 static void
1502 nds32_asm_function_prologue (FILE *file,
1503 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1504 {
1505 int r;
1506 const char *func_name;
1507 tree attrs;
1508 tree name;
1509
1510 /* All stack frame information is supposed to be
1511 already computed when expanding prologue.
1512 The result is in cfun->machine.
1513 DO NOT call nds32_compute_stack_frame() here
1514 because it may corrupt the essential information. */
1515
1516 fprintf (file, "\t! BEGIN PROLOGUE\n");
1517 fprintf (file, "\t! fp needed: %d\n", frame_pointer_needed);
1518 fprintf (file, "\t! pretend_args: %d\n", cfun->machine->va_args_size);
1519 fprintf (file, "\t! local_size: %d\n", cfun->machine->local_size);
1520 fprintf (file, "\t! out_args_size: %d\n", cfun->machine->out_args_size);
1521
1522 /* Use df_regs_ever_live_p() to detect if the register
1523 is ever used in the current function. */
1524 fprintf (file, "\t! registers ever_live: ");
1525 for (r = 0; r < 32; r++)
1526 {
1527 if (df_regs_ever_live_p (r))
1528 fprintf (file, "%s, ", reg_names[r]);
1529 }
1530 fputc ('\n', file);
1531
1532 /* Display the attributes of this function. */
1533 fprintf (file, "\t! function attributes: ");
1534 /* Get the attributes tree list.
1535 Note that GCC builds attributes list with reverse order. */
1536 attrs = DECL_ATTRIBUTES (current_function_decl);
1537
1538 /* If there is no any attribute, print out "None". */
1539 if (!attrs)
1540 fprintf (file, "None");
1541
1542 /* If there are some attributes, try if we need to
1543 construct isr vector information. */
1544 func_name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
1545 nds32_construct_isr_vectors_information (attrs, func_name);
1546
1547 /* Display all attributes of this function. */
1548 while (attrs)
1549 {
1550 name = TREE_PURPOSE (attrs);
1551 fprintf (file, "%s ", IDENTIFIER_POINTER (name));
1552
1553 /* Pick up the next attribute. */
1554 attrs = TREE_CHAIN (attrs);
1555 }
1556 fputc ('\n', file);
1557 }
1558
1559 /* After rtl prologue has been expanded, this function is used. */
1560 static void
1561 nds32_asm_function_end_prologue (FILE *file)
1562 {
1563 fprintf (file, "\t! END PROLOGUE\n");
1564
1565 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1566 we can generate special directive: ".omit_fp_begin"
1567 to guide linker doing fp-as-gp optimization.
1568 However, for a naked function, which means
1569 it should not have prologue/epilogue,
1570 using fp-as-gp still requires saving $fp by push/pop behavior and
1571 there is no benefit to use fp-as-gp on such small function.
1572 So we need to make sure this function is NOT naked as well. */
1573 if (!frame_pointer_needed
1574 && !cfun->machine->naked_p
1575 && cfun->machine->fp_as_gp_p)
1576 {
1577 fprintf (file, "\t! ----------------------------------------\n");
1578 fprintf (file, "\t! Guide linker to do "
1579 "link time optimization: fp-as-gp\n");
1580 fprintf (file, "\t! We add one more instruction to "
1581 "initialize $fp near to $gp location.\n");
1582 fprintf (file, "\t! If linker fails to use fp-as-gp transformation,\n");
1583 fprintf (file, "\t! this extra instruction should be "
1584 "eliminated at link stage.\n");
1585 fprintf (file, "\t.omit_fp_begin\n");
1586 fprintf (file, "\tla\t$fp,_FP_BASE_\n");
1587 fprintf (file, "\t! ----------------------------------------\n");
1588 }
1589 }
1590
1591 /* Before rtl epilogue has been expanded, this function is used. */
1592 static void
1593 nds32_asm_function_begin_epilogue (FILE *file)
1594 {
1595 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1596 we can generate special directive: ".omit_fp_end"
1597 to claim fp-as-gp optimization range.
1598 However, for a naked function,
1599 which means it should not have prologue/epilogue,
1600 using fp-as-gp still requires saving $fp by push/pop behavior and
1601 there is no benefit to use fp-as-gp on such small function.
1602 So we need to make sure this function is NOT naked as well. */
1603 if (!frame_pointer_needed
1604 && !cfun->machine->naked_p
1605 && cfun->machine->fp_as_gp_p)
1606 {
1607 fprintf (file, "\t! ----------------------------------------\n");
1608 fprintf (file, "\t! Claim the range of fp-as-gp "
1609 "link time optimization\n");
1610 fprintf (file, "\t.omit_fp_end\n");
1611 fprintf (file, "\t! ----------------------------------------\n");
1612 }
1613
1614 fprintf (file, "\t! BEGIN EPILOGUE\n");
1615 }
1616
1617 /* The content produced from this function
1618 will be placed after epilogue body. */
1619 static void
1620 nds32_asm_function_epilogue (FILE *file,
1621 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1622 {
1623 fprintf (file, "\t! END EPILOGUE\n");
1624 }
1625
1626 static void
1627 nds32_asm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
1628 HOST_WIDE_INT delta,
1629 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
1630 tree function)
1631 {
1632 int this_regno;
1633
1634 /* Make sure unwind info is emitted for the thunk if needed. */
1635 final_start_function (emit_barrier (), file, 1);
1636
1637 this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
1638 ? 1
1639 : 0);
1640
1641 if (delta != 0)
1642 {
1643 if (satisfies_constraint_Is15 (GEN_INT (delta)))
1644 {
1645 fprintf (file, "\taddi\t$r%d, $r%d, %ld\n",
1646 this_regno, this_regno, delta);
1647 }
1648 else if (satisfies_constraint_Is20 (GEN_INT (delta)))
1649 {
1650 fprintf (file, "\tmovi\t$ta, %ld\n", delta);
1651 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1652 }
1653 else
1654 {
1655 fprintf (file, "\tsethi\t$ta, hi20(%ld)\n", delta);
1656 fprintf (file, "\tori\t$ta, $ta, lo12(%ld)\n", delta);
1657 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1658 }
1659 }
1660
1661 fprintf (file, "\tb\t");
1662 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
1663 fprintf (file, "\n");
1664
1665 final_end_function ();
1666 }
1667
1668 /* -- Permitting tail calls. */
1669
1670 /* Determine whether we need to enable warning for function return check. */
1671 static bool
1672 nds32_warn_func_return (tree decl)
1673 {
1674 /* Naked functions are implemented entirely in assembly, including the
1675 return sequence, so suppress warnings about this. */
1676 return !nds32_naked_function_p (decl);
1677 }
1678
1679 \f
1680 /* Implementing the Varargs Macros. */
1681
1682 static void
1683 nds32_setup_incoming_varargs (cumulative_args_t ca,
1684 machine_mode mode,
1685 tree type,
1686 int *pretend_args_size,
1687 int second_time ATTRIBUTE_UNUSED)
1688 {
1689 unsigned int total_args_regs;
1690 unsigned int num_of_used_regs;
1691 unsigned int remaining_reg_count;
1692 CUMULATIVE_ARGS *cum;
1693
1694 /* If we are under hard float abi, we do not need to set *pretend_args_size.
1695 So that all nameless arguments are pushed by caller and all situation
1696 can be handled by GCC itself. */
1697 if (TARGET_HARD_FLOAT)
1698 return;
1699
1700 /* We are using NDS32_MAX_GPR_REGS_FOR_ARGS registers,
1701 counting from NDS32_GPR_ARG_FIRST_REGNUM, for saving incoming arguments.
1702 However, for nameless(anonymous) arguments, we should push them on the
1703 stack so that all the nameless arguments appear to have been passed
1704 consecutively in the memory for accessing. Hence, we need to check and
1705 exclude the registers that are used for named arguments. */
1706
1707 cum = get_cumulative_args (ca);
1708
1709 /* The MODE and TYPE describe the last argument.
1710 We need those information to determine the remaining registers
1711 for varargs. */
1712 total_args_regs
1713 = NDS32_MAX_GPR_REGS_FOR_ARGS + NDS32_GPR_ARG_FIRST_REGNUM;
1714 num_of_used_regs
1715 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1716 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1717
1718 remaining_reg_count = total_args_regs - num_of_used_regs;
1719 *pretend_args_size = remaining_reg_count * UNITS_PER_WORD;
1720
1721 return;
1722 }
1723
1724 static bool
1725 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1726 {
1727 /* If this hook returns true, the named argument of FUNCTION_ARG is always
1728 true for named arguments, and false for unnamed arguments. */
1729 return true;
1730 }
1731
1732 \f
1733 /* Trampolines for Nested Functions. */
1734
1735 static void
1736 nds32_asm_trampoline_template (FILE *f)
1737 {
1738 if (TARGET_REDUCED_REGS)
1739 {
1740 /* Trampoline is not supported on reduced-set registers yet. */
1741 sorry ("a nested function is not supported for reduced registers");
1742 }
1743 else
1744 {
1745 asm_fprintf (f, "\t! Trampoline code template\n");
1746 asm_fprintf (f, "\t! This code fragment will be copied "
1747 "into stack on demand\n");
1748
1749 asm_fprintf (f, "\tmfusr\t$r16,$pc\n");
1750 asm_fprintf (f, "\tlwi\t$r15,[$r16 + 20] "
1751 "! load nested function address\n");
1752 asm_fprintf (f, "\tlwi\t$r16,[$r16 + 16] "
1753 "! load chain_value\n");
1754 asm_fprintf (f, "\tjr\t$r15\n");
1755 }
1756
1757 /* Preserve space ($pc + 16) for saving chain_value,
1758 nds32_trampoline_init will fill the value in this slot. */
1759 asm_fprintf (f, "\t! space for saving chain_value\n");
1760 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1761
1762 /* Preserve space ($pc + 20) for saving nested function address,
1763 nds32_trampoline_init will fill the value in this slot. */
1764 asm_fprintf (f, "\t! space for saving nested function address\n");
1765 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1766 }
1767
1768 /* Emit RTL insns to initialize the variable parts of a trampoline. */
1769 static void
1770 nds32_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
1771 {
1772 int i;
1773
1774 /* Nested function address. */
1775 rtx fnaddr;
1776 /* The memory rtx that is going to
1777 be filled with chain_value. */
1778 rtx chain_value_mem;
1779 /* The memory rtx that is going to
1780 be filled with nested function address. */
1781 rtx nested_func_mem;
1782
1783 /* Start address of trampoline code in stack, for doing cache sync. */
1784 rtx sync_cache_addr;
1785 /* Temporary register for sync instruction. */
1786 rtx tmp_reg;
1787 /* Instruction-cache sync instruction,
1788 requesting an argument as starting address. */
1789 rtx isync_insn;
1790 /* For convenience reason of doing comparison. */
1791 int tramp_align_in_bytes;
1792
1793 /* Trampoline is not supported on reduced-set registers yet. */
1794 if (TARGET_REDUCED_REGS)
1795 sorry ("a nested function is not supported for reduced registers");
1796
1797 /* STEP 1: Copy trampoline code template into stack,
1798 fill up essential data into stack. */
1799
1800 /* Extract nested function address rtx. */
1801 fnaddr = XEXP (DECL_RTL (fndecl), 0);
1802
1803 /* m_tramp is memory rtx that is going to be filled with trampoline code.
1804 We have nds32_asm_trampoline_template() to emit template pattern. */
1805 emit_block_move (m_tramp, assemble_trampoline_template (),
1806 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
1807
1808 /* After copying trampoline code into stack,
1809 fill chain_value into stack. */
1810 chain_value_mem = adjust_address (m_tramp, SImode, 16);
1811 emit_move_insn (chain_value_mem, chain_value);
1812 /* After copying trampoline code int stack,
1813 fill nested function address into stack. */
1814 nested_func_mem = adjust_address (m_tramp, SImode, 20);
1815 emit_move_insn (nested_func_mem, fnaddr);
1816
1817 /* STEP 2: Sync instruction-cache. */
1818
1819 /* We have successfully filled trampoline code into stack.
1820 However, in order to execute code in stack correctly,
1821 we must sync instruction cache. */
1822 sync_cache_addr = XEXP (m_tramp, 0);
1823 tmp_reg = gen_reg_rtx (SImode);
1824 isync_insn = gen_unspec_volatile_isync (tmp_reg);
1825
1826 /* Because nds32_cache_block_size is in bytes,
1827 we get trampoline alignment in bytes for convenient comparison. */
1828 tramp_align_in_bytes = TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT;
1829
1830 if (tramp_align_in_bytes >= nds32_cache_block_size
1831 && (tramp_align_in_bytes % nds32_cache_block_size) == 0)
1832 {
1833 /* Under this condition, the starting address of trampoline
1834 must be aligned to the starting address of each cache block
1835 and we do not have to worry about cross-boundary issue. */
1836 for (i = 0;
1837 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1838 / nds32_cache_block_size;
1839 i++)
1840 {
1841 emit_move_insn (tmp_reg,
1842 plus_constant (Pmode, sync_cache_addr,
1843 nds32_cache_block_size * i));
1844 emit_insn (isync_insn);
1845 }
1846 }
1847 else if (TRAMPOLINE_SIZE > nds32_cache_block_size)
1848 {
1849 /* The starting address of trampoline code
1850 may not be aligned to the cache block,
1851 so the trampoline code may be across two cache block.
1852 We need to sync the last element, which is 4-byte size,
1853 of trampoline template. */
1854 for (i = 0;
1855 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1856 / nds32_cache_block_size;
1857 i++)
1858 {
1859 emit_move_insn (tmp_reg,
1860 plus_constant (Pmode, sync_cache_addr,
1861 nds32_cache_block_size * i));
1862 emit_insn (isync_insn);
1863 }
1864
1865 /* The last element of trampoline template is 4-byte size. */
1866 emit_move_insn (tmp_reg,
1867 plus_constant (Pmode, sync_cache_addr,
1868 TRAMPOLINE_SIZE - 4));
1869 emit_insn (isync_insn);
1870 }
1871 else
1872 {
1873 /* This is the simplest case.
1874 Because TRAMPOLINE_SIZE is less than or
1875 equal to nds32_cache_block_size,
1876 we can just sync start address and
1877 the last element of trampoline code. */
1878
1879 /* Sync starting address of tampoline code. */
1880 emit_move_insn (tmp_reg, sync_cache_addr);
1881 emit_insn (isync_insn);
1882 /* Sync the last element, which is 4-byte size,
1883 of trampoline template. */
1884 emit_move_insn (tmp_reg,
1885 plus_constant (Pmode, sync_cache_addr,
1886 TRAMPOLINE_SIZE - 4));
1887 emit_insn (isync_insn);
1888 }
1889
1890 /* Set instruction serialization barrier
1891 to guarantee the correct operations. */
1892 emit_insn (gen_unspec_volatile_isb ());
1893 }
1894
1895 \f
1896 /* Addressing Modes. */
1897
1898 static bool
1899 nds32_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1900 {
1901 /* For (mem:DI addr) or (mem:DF addr) case,
1902 we only allow 'addr' to be [reg], [symbol_ref],
1903 [const], or [reg + const_int] pattern. */
1904 if (mode == DImode || mode == DFmode)
1905 {
1906 /* Allow [Reg + const_int] addressing mode. */
1907 if (GET_CODE (x) == PLUS)
1908 {
1909 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
1910 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict)
1911 && CONST_INT_P (XEXP (x, 1)))
1912 return true;
1913
1914 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
1915 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict)
1916 && CONST_INT_P (XEXP (x, 0)))
1917 return true;
1918 }
1919
1920 /* Now check [reg], [symbol_ref], and [const]. */
1921 if (GET_CODE (x) != REG
1922 && GET_CODE (x) != SYMBOL_REF
1923 && GET_CODE (x) != CONST)
1924 return false;
1925 }
1926
1927 /* Check if 'x' is a valid address. */
1928 switch (GET_CODE (x))
1929 {
1930 case REG:
1931 /* (mem (reg A)) => [Ra] */
1932 return nds32_address_register_rtx_p (x, strict);
1933
1934 case SYMBOL_REF:
1935 /* (mem (symbol_ref A)) => [symbol_ref] */
1936 /* If -mcmodel=large, the 'symbol_ref' is not a valid address
1937 during or after LRA/reload phase. */
1938 if (TARGET_CMODEL_LARGE
1939 && (reload_completed
1940 || reload_in_progress
1941 || lra_in_progress))
1942 return false;
1943 /* If -mcmodel=medium and the symbol references to rodata section,
1944 the 'symbol_ref' is not a valid address during or after
1945 LRA/reload phase. */
1946 if (TARGET_CMODEL_MEDIUM
1947 && NDS32_SYMBOL_REF_RODATA_P (x)
1948 && (reload_completed
1949 || reload_in_progress
1950 || lra_in_progress))
1951 return false;
1952
1953 return true;
1954
1955 case CONST:
1956 /* (mem (const (...)))
1957 => [ + const_addr ], where const_addr = symbol_ref + const_int */
1958 if (GET_CODE (XEXP (x, 0)) == PLUS)
1959 {
1960 rtx plus_op = XEXP (x, 0);
1961
1962 rtx op0 = XEXP (plus_op, 0);
1963 rtx op1 = XEXP (plus_op, 1);
1964
1965 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
1966 {
1967 /* Now we see the [ + const_addr ] pattern, but we need
1968 some further checking. */
1969 /* If -mcmodel=large, the 'const_addr' is not a valid address
1970 during or after LRA/reload phase. */
1971 if (TARGET_CMODEL_LARGE
1972 && (reload_completed
1973 || reload_in_progress
1974 || lra_in_progress))
1975 return false;
1976 /* If -mcmodel=medium and the symbol references to rodata section,
1977 the 'const_addr' is not a valid address during or after
1978 LRA/reload phase. */
1979 if (TARGET_CMODEL_MEDIUM
1980 && NDS32_SYMBOL_REF_RODATA_P (op0)
1981 && (reload_completed
1982 || reload_in_progress
1983 || lra_in_progress))
1984 return false;
1985
1986 /* At this point we can make sure 'const_addr' is a
1987 valid address. */
1988 return true;
1989 }
1990 }
1991
1992 return false;
1993
1994 case POST_MODIFY:
1995 /* (mem (post_modify (reg) (plus (reg) (reg))))
1996 => [Ra], Rb */
1997 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1998 => [Ra], const_int */
1999 if (GET_CODE (XEXP (x, 0)) == REG
2000 && GET_CODE (XEXP (x, 1)) == PLUS)
2001 {
2002 rtx plus_op = XEXP (x, 1);
2003
2004 rtx op0 = XEXP (plus_op, 0);
2005 rtx op1 = XEXP (plus_op, 1);
2006
2007 if (nds32_address_register_rtx_p (op0, strict)
2008 && nds32_legitimate_index_p (mode, op1, strict))
2009 return true;
2010 else
2011 return false;
2012 }
2013
2014 return false;
2015
2016 case POST_INC:
2017 case POST_DEC:
2018 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2019 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2020 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2021 We only need to deal with register Ra. */
2022 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
2023 return true;
2024 else
2025 return false;
2026
2027 case PLUS:
2028 /* (mem (plus reg const_int))
2029 => [Ra + imm] */
2030 /* (mem (plus reg reg))
2031 => [Ra + Rb] */
2032 /* (mem (plus (mult reg const_int) reg))
2033 => [Ra + Rb << sv] */
2034 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2035 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict))
2036 return true;
2037 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2038 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict))
2039 return true;
2040 else
2041 return false;
2042
2043 case LO_SUM:
2044 /* (mem (lo_sum (reg) (symbol_ref))) */
2045 /* (mem (lo_sum (reg) (const))) */
2046 gcc_assert (REG_P (XEXP (x, 0)));
2047 if (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
2048 || GET_CODE (XEXP (x, 1)) == CONST)
2049 return nds32_legitimate_address_p (mode, XEXP (x, 1), strict);
2050 else
2051 return false;
2052
2053 default:
2054 return false;
2055 }
2056 }
2057
2058 \f
2059 /* Describing Relative Costs of Operations. */
2060
2061 static int
2062 nds32_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2063 reg_class_t from,
2064 reg_class_t to)
2065 {
2066 if (from == HIGH_REGS || to == HIGH_REGS)
2067 return 6;
2068
2069 return 2;
2070 }
2071
2072 static int
2073 nds32_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2074 reg_class_t rclass ATTRIBUTE_UNUSED,
2075 bool in ATTRIBUTE_UNUSED)
2076 {
2077 return 8;
2078 }
2079
2080 /* This target hook describes the relative costs of RTL expressions.
2081 Return 'true' when all subexpressions of x have been processed.
2082 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
2083 Refer to gcc/rtlanal.c for more information. */
2084 static bool
2085 nds32_rtx_costs (rtx x,
2086 machine_mode mode,
2087 int outer_code,
2088 int opno,
2089 int *total,
2090 bool speed)
2091 {
2092 return nds32_rtx_costs_impl (x, mode, outer_code, opno, total, speed);
2093 }
2094
2095 static int
2096 nds32_address_cost (rtx address,
2097 machine_mode mode,
2098 addr_space_t as,
2099 bool speed)
2100 {
2101 return nds32_address_cost_impl (address, mode, as, speed);
2102 }
2103
2104 \f
2105 /* Dividing the Output into Sections (Texts, Data, . . . ). */
2106
2107 /* If references to a symbol or a constant must be treated differently
2108 depending on something about the variable or function named by the symbol
2109 (such as what section it is in), we use this hook to store flags
2110 in symbol_ref rtx. */
2111 static void
2112 nds32_encode_section_info (tree decl, rtx rtl, int new_decl_p)
2113 {
2114 default_encode_section_info (decl, rtl, new_decl_p);
2115
2116 /* For the memory rtx, if it references to rodata section, we can store
2117 NDS32_SYMBOL_FLAG_RODATA flag into symbol_ref rtx so that the
2118 nds32_legitimate_address_p() can determine how to treat such symbol_ref
2119 based on -mcmodel=X and this information. */
2120 if (MEM_P (rtl) && MEM_READONLY_P (rtl))
2121 {
2122 rtx addr = XEXP (rtl, 0);
2123
2124 if (GET_CODE (addr) == SYMBOL_REF)
2125 {
2126 /* For (mem (symbol_ref X)) case. */
2127 SYMBOL_REF_FLAGS (addr) |= NDS32_SYMBOL_FLAG_RODATA;
2128 }
2129 else if (GET_CODE (addr) == CONST
2130 && GET_CODE (XEXP (addr, 0)) == PLUS)
2131 {
2132 /* For (mem (const (plus (symbol_ref X) (const_int N)))) case. */
2133 rtx plus_op = XEXP (addr, 0);
2134 rtx op0 = XEXP (plus_op, 0);
2135 rtx op1 = XEXP (plus_op, 1);
2136
2137 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
2138 SYMBOL_REF_FLAGS (op0) |= NDS32_SYMBOL_FLAG_RODATA;
2139 }
2140 }
2141 }
2142
2143 \f
2144 /* Defining the Output Assembler Language. */
2145
2146 /* -- The Overall Framework of an Assembler File. */
2147
2148 static void
2149 nds32_asm_file_start (void)
2150 {
2151 default_file_start ();
2152
2153 /* Tell assembler which ABI we are using. */
2154 fprintf (asm_out_file, "\t! ABI version\n");
2155 fprintf (asm_out_file, "\t.abi_2\n");
2156
2157 /* Tell assembler that this asm code is generated by compiler. */
2158 fprintf (asm_out_file, "\t! This asm file is generated by compiler\n");
2159 fprintf (asm_out_file, "\t.flag\tverbatim\n");
2160 /* Give assembler the size of each vector for interrupt handler. */
2161 fprintf (asm_out_file, "\t! This vector size directive is required "
2162 "for checking inconsistency on interrupt handler\n");
2163 fprintf (asm_out_file, "\t.vec_size\t%d\n", nds32_isr_vector_size);
2164
2165 fprintf (asm_out_file, "\t! ------------------------------------\n");
2166
2167 if (TARGET_ISA_V2)
2168 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V2");
2169 if (TARGET_ISA_V3)
2170 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3");
2171 if (TARGET_ISA_V3M)
2172 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3M");
2173
2174 if (TARGET_CMODEL_SMALL)
2175 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "SMALL");
2176 if (TARGET_CMODEL_MEDIUM)
2177 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "MEDIUM");
2178 if (TARGET_CMODEL_LARGE)
2179 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "LARGE");
2180
2181 fprintf (asm_out_file, "\t! Endian setting\t: %s\n",
2182 ((TARGET_BIG_ENDIAN) ? "big-endian"
2183 : "little-endian"));
2184
2185 fprintf (asm_out_file, "\t! ------------------------------------\n");
2186
2187 fprintf (asm_out_file, "\t! Use conditional move\t\t: %s\n",
2188 ((TARGET_CMOV) ? "Yes"
2189 : "No"));
2190 fprintf (asm_out_file, "\t! Use performance extension\t: %s\n",
2191 ((TARGET_PERF_EXT) ? "Yes"
2192 : "No"));
2193
2194 fprintf (asm_out_file, "\t! ------------------------------------\n");
2195
2196 fprintf (asm_out_file, "\t! V3PUSH instructions\t: %s\n",
2197 ((TARGET_V3PUSH) ? "Yes"
2198 : "No"));
2199 fprintf (asm_out_file, "\t! 16-bit instructions\t: %s\n",
2200 ((TARGET_16_BIT) ? "Yes"
2201 : "No"));
2202 fprintf (asm_out_file, "\t! Reduced registers set\t: %s\n",
2203 ((TARGET_REDUCED_REGS) ? "Yes"
2204 : "No"));
2205
2206 fprintf (asm_out_file, "\t! ------------------------------------\n");
2207
2208 if (optimize_size)
2209 fprintf (asm_out_file, "\t! Optimization level\t: -Os\n");
2210 else
2211 fprintf (asm_out_file, "\t! Optimization level\t: -O%d\n", optimize);
2212
2213 fprintf (asm_out_file, "\t! ------------------------------------\n");
2214
2215 fprintf (asm_out_file, "\t! Cache block size\t: %d\n",
2216 nds32_cache_block_size);
2217
2218 fprintf (asm_out_file, "\t! ------------------------------------\n");
2219
2220 nds32_asm_file_start_for_isr ();
2221 }
2222
2223 static void
2224 nds32_asm_file_end (void)
2225 {
2226 nds32_asm_file_end_for_isr ();
2227
2228 fprintf (asm_out_file, "\t! ------------------------------------\n");
2229 }
2230
2231 /* -- Output and Generation of Labels. */
2232
2233 static void
2234 nds32_asm_globalize_label (FILE *stream, const char *name)
2235 {
2236 fputs ("\t.global\t", stream);
2237 assemble_name (stream, name);
2238 fputs ("\n", stream);
2239 }
2240
2241 /* -- Output of Assembler Instructions. */
2242
2243 static void
2244 nds32_print_operand (FILE *stream, rtx x, int code)
2245 {
2246 int op_value;
2247
2248 switch (code)
2249 {
2250 case 0 :
2251 /* Do nothing special. */
2252 break;
2253
2254 case 'V':
2255 /* 'x' is supposed to be CONST_INT, get the value. */
2256 gcc_assert (CONST_INT_P (x));
2257 op_value = INTVAL (x);
2258
2259 /* According to the Andes architecture,
2260 the system/user register index range is 0 ~ 1023.
2261 In order to avoid conflict between user-specified-integer value
2262 and enum-specified-register value,
2263 the 'enum nds32_intrinsic_registers' value
2264 in nds32_intrinsic.h starts from 1024. */
2265 if (op_value < 1024 && op_value >= 0)
2266 {
2267 /* If user gives integer value directly (0~1023),
2268 we just print out the value. */
2269 fprintf (stream, "%d", op_value);
2270 }
2271 else if (op_value < 0
2272 || op_value >= ((int) ARRAY_SIZE (nds32_intrinsic_register_names)
2273 + 1024))
2274 {
2275 /* The enum index value for array size is out of range. */
2276 error ("intrinsic register index is out of range");
2277 }
2278 else
2279 {
2280 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2281 we can print out register name. Remember to substract 1024. */
2282 fprintf (stream, "%s",
2283 nds32_intrinsic_register_names[op_value - 1024]);
2284 }
2285
2286 /* No need to handle following process, so return immediately. */
2287 return;
2288
2289 default :
2290 /* Unknown flag. */
2291 output_operand_lossage ("invalid operand output code");
2292 break;
2293 }
2294
2295 switch (GET_CODE (x))
2296 {
2297 case LABEL_REF:
2298 case SYMBOL_REF:
2299 output_addr_const (stream, x);
2300 break;
2301
2302 case REG:
2303 /* Forbid using static chain register ($r16)
2304 on reduced-set registers configuration. */
2305 if (TARGET_REDUCED_REGS
2306 && REGNO (x) == STATIC_CHAIN_REGNUM)
2307 sorry ("a nested function is not supported for reduced registers");
2308
2309 /* Normal cases, print out register name. */
2310 fputs (reg_names[REGNO (x)], stream);
2311 break;
2312
2313 case MEM:
2314 output_address (GET_MODE (x), XEXP (x, 0));
2315 break;
2316
2317 case CODE_LABEL:
2318 case CONST_INT:
2319 case CONST:
2320 output_addr_const (stream, x);
2321 break;
2322
2323 default:
2324 /* Generally, output_addr_const () is able to handle most cases.
2325 We want to see what CODE could appear,
2326 so we use gcc_unreachable() to stop it. */
2327 debug_rtx (x);
2328 gcc_unreachable ();
2329 break;
2330 }
2331 }
2332
2333 static void
2334 nds32_print_operand_address (FILE *stream, machine_mode /*mode*/, rtx x)
2335 {
2336 rtx op0, op1;
2337
2338 switch (GET_CODE (x))
2339 {
2340 case SYMBOL_REF:
2341 case CONST:
2342 /* [ + symbol_ref] */
2343 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2344 fputs ("[ + ", stream);
2345 output_addr_const (stream, x);
2346 fputs ("]", stream);
2347 break;
2348
2349 case REG:
2350 /* Forbid using static chain register ($r16)
2351 on reduced-set registers configuration. */
2352 if (TARGET_REDUCED_REGS
2353 && REGNO (x) == STATIC_CHAIN_REGNUM)
2354 sorry ("a nested function is not supported for reduced registers");
2355
2356 /* [Ra] */
2357 fprintf (stream, "[%s]", reg_names[REGNO (x)]);
2358 break;
2359
2360 case PLUS:
2361 op0 = XEXP (x, 0);
2362 op1 = XEXP (x, 1);
2363
2364 /* Checking op0, forbid using static chain register ($r16)
2365 on reduced-set registers configuration. */
2366 if (TARGET_REDUCED_REGS
2367 && REG_P (op0)
2368 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2369 sorry ("a nested function is not supported for reduced registers");
2370 /* Checking op1, forbid using static chain register ($r16)
2371 on reduced-set registers configuration. */
2372 if (TARGET_REDUCED_REGS
2373 && REG_P (op1)
2374 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2375 sorry ("a nested function is not supported for reduced registers");
2376
2377 if (REG_P (op0) && CONST_INT_P (op1))
2378 {
2379 /* [Ra + imm] */
2380 fprintf (stream, "[%s + (%d)]",
2381 reg_names[REGNO (op0)], (int)INTVAL (op1));
2382 }
2383 else if (REG_P (op0) && REG_P (op1))
2384 {
2385 /* [Ra + Rb] */
2386 fprintf (stream, "[%s + %s]",
2387 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2388 }
2389 else if (GET_CODE (op0) == MULT && REG_P (op1))
2390 {
2391 /* [Ra + Rb << sv]
2392 From observation, the pattern looks like:
2393 (plus:SI (mult:SI (reg:SI 58)
2394 (const_int 4 [0x4]))
2395 (reg/f:SI 57)) */
2396 int sv;
2397
2398 /* We need to set sv to output shift value. */
2399 if (INTVAL (XEXP (op0, 1)) == 1)
2400 sv = 0;
2401 else if (INTVAL (XEXP (op0, 1)) == 2)
2402 sv = 1;
2403 else if (INTVAL (XEXP (op0, 1)) == 4)
2404 sv = 2;
2405 else
2406 gcc_unreachable ();
2407
2408 fprintf (stream, "[%s + %s << %d]",
2409 reg_names[REGNO (op1)],
2410 reg_names[REGNO (XEXP (op0, 0))],
2411 sv);
2412 }
2413 else
2414 {
2415 /* The control flow is not supposed to be here. */
2416 debug_rtx (x);
2417 gcc_unreachable ();
2418 }
2419
2420 break;
2421
2422 case POST_MODIFY:
2423 /* (post_modify (regA) (plus (regA) (regB)))
2424 (post_modify (regA) (plus (regA) (const_int)))
2425 We would like to extract
2426 regA and regB (or const_int) from plus rtx. */
2427 op0 = XEXP (XEXP (x, 1), 0);
2428 op1 = XEXP (XEXP (x, 1), 1);
2429
2430 /* Checking op0, forbid using static chain register ($r16)
2431 on reduced-set registers configuration. */
2432 if (TARGET_REDUCED_REGS
2433 && REG_P (op0)
2434 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2435 sorry ("a nested function is not supported for reduced registers");
2436 /* Checking op1, forbid using static chain register ($r16)
2437 on reduced-set registers configuration. */
2438 if (TARGET_REDUCED_REGS
2439 && REG_P (op1)
2440 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2441 sorry ("a nested function is not supported for reduced registers");
2442
2443 if (REG_P (op0) && REG_P (op1))
2444 {
2445 /* [Ra], Rb */
2446 fprintf (stream, "[%s], %s",
2447 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2448 }
2449 else if (REG_P (op0) && CONST_INT_P (op1))
2450 {
2451 /* [Ra], imm */
2452 fprintf (stream, "[%s], %d",
2453 reg_names[REGNO (op0)], (int)INTVAL (op1));
2454 }
2455 else
2456 {
2457 /* The control flow is not supposed to be here. */
2458 debug_rtx (x);
2459 gcc_unreachable ();
2460 }
2461
2462 break;
2463
2464 case POST_INC:
2465 case POST_DEC:
2466 op0 = XEXP (x, 0);
2467
2468 /* Checking op0, forbid using static chain register ($r16)
2469 on reduced-set registers configuration. */
2470 if (TARGET_REDUCED_REGS
2471 && REG_P (op0)
2472 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2473 sorry ("a nested function is not supported for reduced registers");
2474
2475 if (REG_P (op0))
2476 {
2477 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
2478 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2479 We only need to deal with register Ra. */
2480 fprintf (stream, "[%s]", reg_names[REGNO (op0)]);
2481 }
2482 else
2483 {
2484 /* The control flow is not supposed to be here. */
2485 debug_rtx (x);
2486 gcc_unreachable ();
2487 }
2488
2489 break;
2490
2491 default :
2492 /* Generally, output_addr_const () is able to handle most cases.
2493 We want to see what CODE could appear,
2494 so we use gcc_unreachable() to stop it. */
2495 debug_rtx (x);
2496 gcc_unreachable ();
2497 break;
2498 }
2499 }
2500
2501 \f
2502 /* Defining target-specific uses of __attribute__. */
2503
2504 /* Add some checking after merging attributes. */
2505 static tree
2506 nds32_merge_decl_attributes (tree olddecl, tree newdecl)
2507 {
2508 tree combined_attrs;
2509
2510 /* Create combined attributes. */
2511 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
2512 DECL_ATTRIBUTES (newdecl));
2513
2514 /* Since newdecl is acutally a duplicate of olddecl,
2515 we can take olddecl for some operations. */
2516 if (TREE_CODE (olddecl) == FUNCTION_DECL)
2517 {
2518 /* Check isr-specific attributes conflict. */
2519 nds32_check_isr_attrs_conflict (olddecl, combined_attrs);
2520 }
2521
2522 return combined_attrs;
2523 }
2524
2525 /* Add some checking when inserting attributes. */
2526 static void
2527 nds32_insert_attributes (tree decl, tree *attributes)
2528 {
2529 /* For function declaration, we need to check isr-specific attributes:
2530 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
2531 2. Check valid integer value for interrupt/exception.
2532 3. Check valid integer value for reset.
2533 4. Check valid function for nmi/warm. */
2534 if (TREE_CODE (decl) == FUNCTION_DECL)
2535 {
2536 tree func_attrs;
2537 tree intr, excp, reset;
2538
2539 /* Pick up function attributes. */
2540 func_attrs = *attributes;
2541
2542 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
2543 nds32_check_isr_attrs_conflict (decl, func_attrs);
2544
2545 /* Now we are starting to check valid id value
2546 for interrupt/exception/reset.
2547 Note that we ONLY check its validity here.
2548 To construct isr vector information, it is still performed
2549 by nds32_construct_isr_vectors_information(). */
2550 intr = lookup_attribute ("interrupt", func_attrs);
2551 excp = lookup_attribute ("exception", func_attrs);
2552 reset = lookup_attribute ("reset", func_attrs);
2553
2554 if (intr || excp)
2555 {
2556 /* Deal with interrupt/exception. */
2557 tree id_list;
2558 unsigned int lower_bound, upper_bound;
2559
2560 /* The way to handle interrupt or exception is the same,
2561 we just need to take care of actual vector number.
2562 For interrupt(0..63), the actual vector number is (9..72).
2563 For exception(1..8), the actual vector number is (1..8). */
2564 lower_bound = (intr) ? (0) : (1);
2565 upper_bound = (intr) ? (63) : (8);
2566
2567 /* Prepare id list so that we can traverse id value. */
2568 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
2569
2570 /* 2. Check valid integer value for interrupt/exception. */
2571 while (id_list)
2572 {
2573 tree id;
2574
2575 /* Pick up each vector id value. */
2576 id = TREE_VALUE (id_list);
2577 /* Issue error if it is not a valid integer value. */
2578 if (TREE_CODE (id) != INTEGER_CST
2579 || wi::ltu_p (id, lower_bound)
2580 || wi::gtu_p (id, upper_bound))
2581 error ("invalid id value for interrupt/exception attribute");
2582
2583 /* Advance to next id. */
2584 id_list = TREE_CHAIN (id_list);
2585 }
2586 }
2587 else if (reset)
2588 {
2589 /* Deal with reset. */
2590 tree id_list;
2591 tree id;
2592 tree nmi, warm;
2593 unsigned int lower_bound;
2594 unsigned int upper_bound;
2595
2596 /* Prepare id_list and identify id value so that
2597 we can check if total number of vectors is valid. */
2598 id_list = TREE_VALUE (reset);
2599 id = TREE_VALUE (id_list);
2600
2601 /* The maximum numbers for user's interrupt is 64. */
2602 lower_bound = 0;
2603 upper_bound = 64;
2604
2605 /* 3. Check valid integer value for reset. */
2606 if (TREE_CODE (id) != INTEGER_CST
2607 || wi::ltu_p (id, lower_bound)
2608 || wi::gtu_p (id, upper_bound))
2609 error ("invalid id value for reset attribute");
2610
2611 /* 4. Check valid function for nmi/warm. */
2612 nmi = lookup_attribute ("nmi", func_attrs);
2613 warm = lookup_attribute ("warm", func_attrs);
2614
2615 if (nmi != NULL_TREE)
2616 {
2617 tree nmi_func_list;
2618 tree nmi_func;
2619
2620 nmi_func_list = TREE_VALUE (nmi);
2621 nmi_func = TREE_VALUE (nmi_func_list);
2622
2623 /* Issue error if it is not a valid nmi function. */
2624 if (TREE_CODE (nmi_func) != IDENTIFIER_NODE)
2625 error ("invalid nmi function for reset attribute");
2626 }
2627
2628 if (warm != NULL_TREE)
2629 {
2630 tree warm_func_list;
2631 tree warm_func;
2632
2633 warm_func_list = TREE_VALUE (warm);
2634 warm_func = TREE_VALUE (warm_func_list);
2635
2636 /* Issue error if it is not a valid warm function. */
2637 if (TREE_CODE (warm_func) != IDENTIFIER_NODE)
2638 error ("invalid warm function for reset attribute");
2639 }
2640 }
2641 else
2642 {
2643 /* No interrupt, exception, or reset attribute is set. */
2644 return;
2645 }
2646 }
2647 }
2648
2649 static bool
2650 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED,
2651 tree pop_target ATTRIBUTE_UNUSED)
2652 {
2653 /* Currently, we do not parse any pragma target by ourself,
2654 so just simply return false. */
2655 return false;
2656 }
2657
2658 static void
2659 nds32_option_override (void)
2660 {
2661 /* After all the command options have been parsed,
2662 we shall deal with some flags for changing compiler settings. */
2663
2664 /* At first, we check if we have to strictly
2665 set some flags based on ISA family. */
2666 if (TARGET_ISA_V2)
2667 {
2668 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
2669 target_flags &= ~MASK_V3PUSH;
2670 }
2671 if (TARGET_ISA_V3)
2672 {
2673 /* Under V3 ISA, currently nothing should be strictly set. */
2674 }
2675 if (TARGET_ISA_V3M)
2676 {
2677 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
2678 target_flags |= MASK_REDUCED_REGS;
2679 /* Under V3M ISA, we need to strictly disable TARGET_PERF_EXT. */
2680 target_flags &= ~MASK_PERF_EXT;
2681 }
2682
2683 /* See if we are using reduced-set registers:
2684 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
2685 If so, we must forbid using $r11~$r14, $r16~$r27. */
2686 if (TARGET_REDUCED_REGS)
2687 {
2688 int r;
2689
2690 /* Prevent register allocator from
2691 choosing it as doing register allocation. */
2692 for (r = 11; r <= 14; r++)
2693 fixed_regs[r] = call_used_regs[r] = 1;
2694 for (r = 16; r <= 27; r++)
2695 fixed_regs[r] = call_used_regs[r] = 1;
2696 }
2697
2698 if (!TARGET_16_BIT)
2699 {
2700 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
2701 target_flags &= ~MASK_V3PUSH;
2702 }
2703
2704 /* Currently, we don't support PIC code generation yet. */
2705 if (flag_pic)
2706 sorry ("not support -fpic");
2707 }
2708
2709 \f
2710 /* Miscellaneous Parameters. */
2711
2712 static void
2713 nds32_init_builtins (void)
2714 {
2715 nds32_init_builtins_impl ();
2716 }
2717
2718 static rtx
2719 nds32_expand_builtin (tree exp,
2720 rtx target,
2721 rtx subtarget,
2722 machine_mode mode,
2723 int ignore)
2724 {
2725 return nds32_expand_builtin_impl (exp, target, subtarget, mode, ignore);
2726 }
2727
2728
2729 /* ------------------------------------------------------------------------ */
2730
2731 /* PART 4: Implemet extern function definitions,
2732 the prototype is in nds32-protos.h. */
2733 \f
2734 /* Defining Data Structures for Per-function Information. */
2735
2736 void
2737 nds32_init_expanders (void)
2738 {
2739 /* Arrange to initialize and mark the machine per-function status. */
2740 init_machine_status = nds32_init_machine_status;
2741 }
2742
2743 \f
2744 /* Register Usage. */
2745
2746 /* -- How Values Fit in Registers. */
2747
2748 int
2749 nds32_hard_regno_nregs (int regno ATTRIBUTE_UNUSED,
2750 machine_mode mode)
2751 {
2752 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
2753 }
2754
2755 int
2756 nds32_hard_regno_mode_ok (int regno, machine_mode mode)
2757 {
2758 /* Restrict double-word quantities to even register pairs. */
2759 if (HARD_REGNO_NREGS (regno, mode) == 1
2760 || !((regno) & 1))
2761 return 1;
2762
2763 return 0;
2764 }
2765
2766 \f
2767 /* Register Classes. */
2768
2769 enum reg_class
2770 nds32_regno_reg_class (int regno)
2771 {
2772 /* Refer to nds32.h for more register class details. */
2773
2774 if (regno >= 0 && regno <= 7)
2775 return LOW_REGS;
2776 else if (regno >= 8 && regno <= 11)
2777 return MIDDLE_REGS;
2778 else if (regno >= 12 && regno <= 14)
2779 return HIGH_REGS;
2780 else if (regno == 15)
2781 return R15_TA_REG;
2782 else if (regno >= 16 && regno <= 19)
2783 return MIDDLE_REGS;
2784 else if (regno >= 20 && regno <= 31)
2785 return HIGH_REGS;
2786 else if (regno == 32 || regno == 33)
2787 return FRAME_REGS;
2788 else
2789 return NO_REGS;
2790 }
2791
2792 \f
2793 /* Stack Layout and Calling Conventions. */
2794
2795 /* -- Basic Stack Layout. */
2796
2797 rtx
2798 nds32_return_addr_rtx (int count,
2799 rtx frameaddr ATTRIBUTE_UNUSED)
2800 {
2801 /* There is no way to determine the return address
2802 if frameaddr is the frame that has 'count' steps
2803 up from current frame. */
2804 if (count != 0)
2805 return NULL_RTX;
2806
2807 /* If count == 0, it means we are at current frame,
2808 the return address is $r30 ($lp). */
2809 return get_hard_reg_initial_val (Pmode, LP_REGNUM);
2810 }
2811
2812 /* -- Eliminating Frame Pointer and Arg Pointer. */
2813
2814 HOST_WIDE_INT
2815 nds32_initial_elimination_offset (unsigned int from_reg, unsigned int to_reg)
2816 {
2817 HOST_WIDE_INT offset;
2818
2819 /* Compute and setup stack frame size.
2820 The result will be in cfun->machine. */
2821 nds32_compute_stack_frame ();
2822
2823 /* Remember to consider
2824 cfun->machine->callee_saved_area_padding_bytes
2825 when calculating offset. */
2826 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
2827 {
2828 offset = (cfun->machine->fp_size
2829 + cfun->machine->gp_size
2830 + cfun->machine->lp_size
2831 + cfun->machine->callee_saved_gpr_regs_size
2832 + cfun->machine->callee_saved_area_gpr_padding_bytes
2833 + cfun->machine->local_size
2834 + cfun->machine->out_args_size);
2835 }
2836 else if (from_reg == ARG_POINTER_REGNUM
2837 && to_reg == HARD_FRAME_POINTER_REGNUM)
2838 {
2839 offset = 0;
2840 }
2841 else if (from_reg == FRAME_POINTER_REGNUM
2842 && to_reg == STACK_POINTER_REGNUM)
2843 {
2844 offset = (cfun->machine->local_size + cfun->machine->out_args_size);
2845 }
2846 else if (from_reg == FRAME_POINTER_REGNUM
2847 && to_reg == HARD_FRAME_POINTER_REGNUM)
2848 {
2849 offset = (-1) * (cfun->machine->fp_size
2850 + cfun->machine->gp_size
2851 + cfun->machine->lp_size
2852 + cfun->machine->callee_saved_gpr_regs_size
2853 + cfun->machine->callee_saved_area_gpr_padding_bytes);
2854 }
2855 else
2856 {
2857 gcc_unreachable ();
2858 }
2859
2860 return offset;
2861 }
2862
2863 /* -- Passing Arguments in Registers. */
2864
2865 void
2866 nds32_init_cumulative_args (CUMULATIVE_ARGS *cum,
2867 tree fntype ATTRIBUTE_UNUSED,
2868 rtx libname ATTRIBUTE_UNUSED,
2869 tree fndecl ATTRIBUTE_UNUSED,
2870 int n_named_args ATTRIBUTE_UNUSED)
2871 {
2872 /* Initial available registers
2873 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
2874 for passing arguments. */
2875 cum->gpr_offset = 0;
2876 }
2877
2878 /* -- Function Entry and Exit. */
2879
2880 /* Function for normal multiple push prologue. */
2881 void
2882 nds32_expand_prologue (void)
2883 {
2884 int fp_adjust;
2885 int sp_adjust;
2886 int en4_const;
2887
2888 rtx Rb, Re;
2889 rtx fp_adjust_insn, sp_adjust_insn;
2890
2891 /* Compute and setup stack frame size.
2892 The result will be in cfun->machine. */
2893 nds32_compute_stack_frame ();
2894
2895 /* If this is a variadic function, first we need to push argument
2896 registers that hold the unnamed argument value. */
2897 if (cfun->machine->va_args_size != 0)
2898 {
2899 Rb = gen_rtx_REG (SImode, cfun->machine->va_args_first_regno);
2900 Re = gen_rtx_REG (SImode, cfun->machine->va_args_last_regno);
2901 /* No need to push $fp, $gp, or $lp, so use GEN_INT(0). */
2902 nds32_emit_stack_push_multiple (Rb, Re, GEN_INT (0), true);
2903
2904 /* We may also need to adjust stack pointer for padding bytes
2905 because varargs may cause $sp not 8-byte aligned. */
2906 if (cfun->machine->va_args_area_padding_bytes)
2907 {
2908 /* Generate sp adjustment instruction. */
2909 sp_adjust = cfun->machine->va_args_area_padding_bytes;
2910 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2911 stack_pointer_rtx,
2912 GEN_INT (-1 * sp_adjust));
2913
2914 /* Emit rtx into instructions list and receive INSN rtx form. */
2915 sp_adjust_insn = emit_insn (sp_adjust_insn);
2916
2917 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2918 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2919 generate CFI (Call Frame Information) stuff. */
2920 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
2921 }
2922 }
2923
2924 /* If the function is 'naked',
2925 we do not have to generate prologue code fragment. */
2926 if (cfun->machine->naked_p)
2927 return;
2928
2929 /* Get callee_first_regno and callee_last_regno. */
2930 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
2931 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
2932
2933 /* nds32_emit_stack_push_multiple(first_regno, last_regno),
2934 the pattern 'stack_push_multiple' is implemented in nds32.md.
2935 For En4 field, we have to calculate its constant value.
2936 Refer to Andes ISA for more information. */
2937 en4_const = 0;
2938 if (cfun->machine->fp_size)
2939 en4_const += 8;
2940 if (cfun->machine->gp_size)
2941 en4_const += 4;
2942 if (cfun->machine->lp_size)
2943 en4_const += 2;
2944
2945 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2946 to be saved, we don't have to create multiple push instruction.
2947 Otherwise, a multiple push instruction is needed. */
2948 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
2949 {
2950 /* Create multiple push instruction rtx. */
2951 nds32_emit_stack_push_multiple (Rb, Re, GEN_INT (en4_const), false);
2952 }
2953
2954 /* Check frame_pointer_needed to see
2955 if we shall emit fp adjustment instruction. */
2956 if (frame_pointer_needed)
2957 {
2958 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
2959 + (4 * callee-saved-registers)
2960 Note: No need to adjust
2961 cfun->machine->callee_saved_area_padding_bytes,
2962 because, at this point, stack pointer is just
2963 at the position after push instruction. */
2964 fp_adjust = cfun->machine->fp_size
2965 + cfun->machine->gp_size
2966 + cfun->machine->lp_size
2967 + cfun->machine->callee_saved_gpr_regs_size;
2968 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
2969 stack_pointer_rtx,
2970 GEN_INT (fp_adjust));
2971 /* Emit rtx into instructions list and receive INSN rtx form. */
2972 fp_adjust_insn = emit_insn (fp_adjust_insn);
2973
2974 /* The insn rtx 'fp_adjust_insn' will change frame layout. */
2975 RTX_FRAME_RELATED_P (fp_adjust_insn) = 1;
2976 }
2977
2978 /* Adjust $sp = $sp - local_size - out_args_size
2979 - callee_saved_area_padding_bytes. */
2980 sp_adjust = cfun->machine->local_size
2981 + cfun->machine->out_args_size
2982 + cfun->machine->callee_saved_area_gpr_padding_bytes;
2983 /* sp_adjust value may be out of range of the addi instruction,
2984 create alternative add behavior with TA_REGNUM if necessary,
2985 using NEGATIVE value to tell that we are decreasing address. */
2986 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
2987 if (sp_adjust)
2988 {
2989 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
2990 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2991 stack_pointer_rtx,
2992 GEN_INT (-1 * sp_adjust));
2993 /* Emit rtx into instructions list and receive INSN rtx form. */
2994 sp_adjust_insn = emit_insn (sp_adjust_insn);
2995
2996 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2997 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2998 generate CFI (Call Frame Information) stuff. */
2999 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3000 }
3001
3002 /* Prevent the instruction scheduler from
3003 moving instructions across the boundary. */
3004 emit_insn (gen_blockage ());
3005 }
3006
3007 /* Function for normal multiple pop epilogue. */
3008 void
3009 nds32_expand_epilogue (bool sibcall_p)
3010 {
3011 int sp_adjust;
3012 int en4_const;
3013
3014 rtx Rb, Re;
3015 rtx sp_adjust_insn;
3016
3017 /* Compute and setup stack frame size.
3018 The result will be in cfun->machine. */
3019 nds32_compute_stack_frame ();
3020
3021 /* Prevent the instruction scheduler from
3022 moving instructions across the boundary. */
3023 emit_insn (gen_blockage ());
3024
3025 /* If the function is 'naked', we do not have to generate
3026 epilogue code fragment BUT 'ret' instruction.
3027 However, if this function is also a variadic function,
3028 we need to create adjust stack pointer before 'ret' instruction. */
3029 if (cfun->machine->naked_p)
3030 {
3031 /* If this is a variadic function, we do not have to restore argument
3032 registers but need to adjust stack pointer back to previous stack
3033 frame location before return. */
3034 if (cfun->machine->va_args_size != 0)
3035 {
3036 /* Generate sp adjustment instruction.
3037 We need to consider padding bytes here. */
3038 sp_adjust = cfun->machine->va_args_size
3039 + cfun->machine->va_args_area_padding_bytes;
3040 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3041 stack_pointer_rtx,
3042 GEN_INT (sp_adjust));
3043 /* Emit rtx into instructions list and receive INSN rtx form. */
3044 sp_adjust_insn = emit_insn (sp_adjust_insn);
3045
3046 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3047 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3048 generate CFI (Call Frame Information) stuff. */
3049 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3050 }
3051
3052 /* Generate return instruction by using 'return_internal' pattern.
3053 Make sure this instruction is after gen_blockage(). */
3054 if (!sibcall_p)
3055 emit_jump_insn (gen_return_internal ());
3056 return;
3057 }
3058
3059 if (frame_pointer_needed)
3060 {
3061 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
3062 - (4 * callee-saved-registers)
3063 Note: No need to adjust
3064 cfun->machine->callee_saved_area_padding_bytes,
3065 because we want to adjust stack pointer
3066 to the position for pop instruction. */
3067 sp_adjust = cfun->machine->fp_size
3068 + cfun->machine->gp_size
3069 + cfun->machine->lp_size
3070 + cfun->machine->callee_saved_gpr_regs_size;
3071 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3072 hard_frame_pointer_rtx,
3073 GEN_INT (-1 * sp_adjust));
3074 /* Emit rtx into instructions list and receive INSN rtx form. */
3075 sp_adjust_insn = emit_insn (sp_adjust_insn);
3076
3077 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3078 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3079 }
3080 else
3081 {
3082 /* If frame pointer is NOT needed,
3083 we cannot calculate the sp adjustment from frame pointer.
3084 Instead, we calculate the adjustment by local_size,
3085 out_args_size, and callee_saved_area_padding_bytes.
3086 Notice that such sp adjustment value may be out of range,
3087 so we have to deal with it as well. */
3088
3089 /* Adjust $sp = $sp + local_size + out_args_size
3090 + callee_saved_area_padding_bytes. */
3091 sp_adjust = cfun->machine->local_size
3092 + cfun->machine->out_args_size
3093 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3094 /* sp_adjust value may be out of range of the addi instruction,
3095 create alternative add behavior with TA_REGNUM if necessary,
3096 using POSITIVE value to tell that we are increasing address. */
3097 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3098 if (sp_adjust)
3099 {
3100 /* Generate sp adjustment instruction
3101 if and only if sp_adjust != 0. */
3102 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3103 stack_pointer_rtx,
3104 GEN_INT (sp_adjust));
3105 /* Emit rtx into instructions list and receive INSN rtx form. */
3106 sp_adjust_insn = emit_insn (sp_adjust_insn);
3107
3108 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3109 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3110 }
3111 }
3112
3113 /* Get callee_first_regno and callee_last_regno. */
3114 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3115 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3116
3117 /* nds32_emit_stack_pop_multiple(first_regno, last_regno),
3118 the pattern 'stack_pop_multiple' is implementad in nds32.md.
3119 For En4 field, we have to calculate its constant value.
3120 Refer to Andes ISA for more information. */
3121 en4_const = 0;
3122 if (cfun->machine->fp_size)
3123 en4_const += 8;
3124 if (cfun->machine->gp_size)
3125 en4_const += 4;
3126 if (cfun->machine->lp_size)
3127 en4_const += 2;
3128
3129 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3130 to be saved, we don't have to create multiple pop instruction.
3131 Otherwise, a multiple pop instruction is needed. */
3132 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
3133 {
3134 /* Create multiple pop instruction rtx. */
3135 nds32_emit_stack_pop_multiple (Rb, Re, GEN_INT (en4_const));
3136 }
3137
3138 /* If this is a variadic function, we do not have to restore argument
3139 registers but need to adjust stack pointer back to previous stack
3140 frame location before return. */
3141 if (cfun->machine->va_args_size != 0)
3142 {
3143 /* Generate sp adjustment instruction.
3144 We need to consider padding bytes here. */
3145 sp_adjust = cfun->machine->va_args_size
3146 + cfun->machine->va_args_area_padding_bytes;
3147 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3148 stack_pointer_rtx,
3149 GEN_INT (sp_adjust));
3150 /* Emit rtx into instructions list and receive INSN rtx form. */
3151 sp_adjust_insn = emit_insn (sp_adjust_insn);
3152
3153 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3154 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3155 generate CFI (Call Frame Information) stuff. */
3156 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3157 }
3158
3159 /* Generate return instruction. */
3160 if (!sibcall_p)
3161 emit_jump_insn (gen_return_internal ());
3162 }
3163
3164 /* Function for v3push prologue. */
3165 void
3166 nds32_expand_prologue_v3push (void)
3167 {
3168 int fp_adjust;
3169 int sp_adjust;
3170
3171 rtx Rb, Re;
3172 rtx fp_adjust_insn, sp_adjust_insn;
3173
3174 /* Compute and setup stack frame size.
3175 The result will be in cfun->machine. */
3176 nds32_compute_stack_frame ();
3177
3178 /* If the function is 'naked',
3179 we do not have to generate prologue code fragment. */
3180 if (cfun->machine->naked_p)
3181 return;
3182
3183 /* Get callee_first_regno and callee_last_regno. */
3184 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3185 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3186
3187 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
3188 where imm8u has to be 8-byte alignment. */
3189 sp_adjust = cfun->machine->local_size
3190 + cfun->machine->out_args_size
3191 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3192
3193 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3194 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
3195 {
3196 /* We can use 'push25 Re,imm8u'. */
3197
3198 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3199 the pattern 'stack_v3push' is implemented in nds32.md.
3200 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3201 nds32_emit_stack_v3push (Rb, Re,
3202 GEN_INT (14), GEN_INT (sp_adjust));
3203
3204 /* Check frame_pointer_needed to see
3205 if we shall emit fp adjustment instruction. */
3206 if (frame_pointer_needed)
3207 {
3208 /* adjust $fp = $sp + 4 ($fp size)
3209 + 4 ($gp size)
3210 + 4 ($lp size)
3211 + (4 * n) (callee-saved registers)
3212 + sp_adjust ('push25 Re,imm8u')
3213 Note: Since we use 'push25 Re,imm8u',
3214 the position of stack pointer is further
3215 changed after push instruction.
3216 Hence, we need to take sp_adjust value
3217 into consideration. */
3218 fp_adjust = cfun->machine->fp_size
3219 + cfun->machine->gp_size
3220 + cfun->machine->lp_size
3221 + cfun->machine->callee_saved_gpr_regs_size
3222 + sp_adjust;
3223 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3224 stack_pointer_rtx,
3225 GEN_INT (fp_adjust));
3226 /* Emit rtx into instructions list and receive INSN rtx form. */
3227 fp_adjust_insn = emit_insn (fp_adjust_insn);
3228 }
3229 }
3230 else
3231 {
3232 /* We have to use 'push25 Re,0' and
3233 expand one more instruction to adjust $sp later. */
3234
3235 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3236 the pattern 'stack_v3push' is implemented in nds32.md.
3237 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3238 nds32_emit_stack_v3push (Rb, Re,
3239 GEN_INT (14), GEN_INT (0));
3240
3241 /* Check frame_pointer_needed to see
3242 if we shall emit fp adjustment instruction. */
3243 if (frame_pointer_needed)
3244 {
3245 /* adjust $fp = $sp + 4 ($fp size)
3246 + 4 ($gp size)
3247 + 4 ($lp size)
3248 + (4 * n) (callee-saved registers)
3249 Note: Since we use 'push25 Re,0',
3250 the stack pointer is just at the position
3251 after push instruction.
3252 No need to take sp_adjust into consideration. */
3253 fp_adjust = cfun->machine->fp_size
3254 + cfun->machine->gp_size
3255 + cfun->machine->lp_size
3256 + cfun->machine->callee_saved_gpr_regs_size;
3257 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3258 stack_pointer_rtx,
3259 GEN_INT (fp_adjust));
3260 /* Emit rtx into instructions list and receive INSN rtx form. */
3261 fp_adjust_insn = emit_insn (fp_adjust_insn);
3262 }
3263
3264 /* Because we use 'push25 Re,0',
3265 we need to expand one more instruction to adjust $sp.
3266 However, sp_adjust value may be out of range of the addi instruction,
3267 create alternative add behavior with TA_REGNUM if necessary,
3268 using NEGATIVE value to tell that we are decreasing address. */
3269 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3270 if (sp_adjust)
3271 {
3272 /* Generate sp adjustment instruction
3273 if and only if sp_adjust != 0. */
3274 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3275 stack_pointer_rtx,
3276 GEN_INT (-1 * sp_adjust));
3277 /* Emit rtx into instructions list and receive INSN rtx form. */
3278 sp_adjust_insn = emit_insn (sp_adjust_insn);
3279
3280 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3281 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3282 generate CFI (Call Frame Information) stuff. */
3283 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3284 }
3285 }
3286
3287 /* Prevent the instruction scheduler from
3288 moving instructions across the boundary. */
3289 emit_insn (gen_blockage ());
3290 }
3291
3292 /* Function for v3pop epilogue. */
3293 void
3294 nds32_expand_epilogue_v3pop (bool sibcall_p)
3295 {
3296 int sp_adjust;
3297
3298 rtx Rb, Re;
3299 rtx sp_adjust_insn;
3300
3301 /* Compute and setup stack frame size.
3302 The result will be in cfun->machine. */
3303 nds32_compute_stack_frame ();
3304
3305 /* Prevent the instruction scheduler from
3306 moving instructions across the boundary. */
3307 emit_insn (gen_blockage ());
3308
3309 /* If the function is 'naked', we do not have to generate
3310 epilogue code fragment BUT 'ret' instruction. */
3311 if (cfun->machine->naked_p)
3312 {
3313 /* Generate return instruction by using 'return_internal' pattern.
3314 Make sure this instruction is after gen_blockage(). */
3315 if (!sibcall_p)
3316 emit_jump_insn (gen_return_internal ());
3317 return;
3318 }
3319
3320 /* Get callee_first_regno and callee_last_regno. */
3321 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3322 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3323
3324 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3325 where imm8u has to be 8-byte alignment. */
3326 sp_adjust = cfun->machine->local_size
3327 + cfun->machine->out_args_size
3328 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3329
3330 /* We have to consider alloca issue as well.
3331 If the function does call alloca(), the stack pointer is not fixed.
3332 In that case, we cannot use 'pop25 Re,imm8u' directly.
3333 We have to caculate stack pointer from frame pointer
3334 and then use 'pop25 Re,0'.
3335 Of course, the frame_pointer_needed should be nonzero
3336 if the function calls alloca(). */
3337 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3338 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
3339 && !cfun->calls_alloca)
3340 {
3341 /* We can use 'pop25 Re,imm8u'. */
3342
3343 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3344 the pattern 'stack_v3pop' is implementad in nds32.md.
3345 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3346 nds32_emit_stack_v3pop (Rb, Re,
3347 GEN_INT (14), GEN_INT (sp_adjust));
3348 }
3349 else
3350 {
3351 /* We have to use 'pop25 Re,0', and prior to it,
3352 we must expand one more instruction to adjust $sp. */
3353
3354 if (frame_pointer_needed)
3355 {
3356 /* adjust $sp = $fp - 4 ($fp size)
3357 - 4 ($gp size)
3358 - 4 ($lp size)
3359 - (4 * n) (callee-saved registers)
3360 Note: No need to adjust
3361 cfun->machine->callee_saved_area_padding_bytes,
3362 because we want to adjust stack pointer
3363 to the position for pop instruction. */
3364 sp_adjust = cfun->machine->fp_size
3365 + cfun->machine->gp_size
3366 + cfun->machine->lp_size
3367 + cfun->machine->callee_saved_gpr_regs_size;
3368 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3369 hard_frame_pointer_rtx,
3370 GEN_INT (-1 * sp_adjust));
3371 /* Emit rtx into instructions list and receive INSN rtx form. */
3372 sp_adjust_insn = emit_insn (sp_adjust_insn);
3373 }
3374 else
3375 {
3376 /* If frame pointer is NOT needed,
3377 we cannot calculate the sp adjustment from frame pointer.
3378 Instead, we calculate the adjustment by local_size,
3379 out_args_size, and callee_saved_area_padding_bytes.
3380 Notice that such sp adjustment value may be out of range,
3381 so we have to deal with it as well. */
3382
3383 /* Adjust $sp = $sp + local_size + out_args_size
3384 + callee_saved_area_padding_bytes. */
3385 sp_adjust = cfun->machine->local_size
3386 + cfun->machine->out_args_size
3387 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3388 /* sp_adjust value may be out of range of the addi instruction,
3389 create alternative add behavior with TA_REGNUM if necessary,
3390 using POSITIVE value to tell that we are increasing address. */
3391 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3392 if (sp_adjust)
3393 {
3394 /* Generate sp adjustment instruction
3395 if and only if sp_adjust != 0. */
3396 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3397 stack_pointer_rtx,
3398 GEN_INT (sp_adjust));
3399 /* Emit rtx into instructions list and receive INSN rtx form. */
3400 sp_adjust_insn = emit_insn (sp_adjust_insn);
3401 }
3402 }
3403
3404 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3405 the pattern 'stack_v3pop' is implementad in nds32.md. */
3406 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3407 nds32_emit_stack_v3pop (Rb, Re,
3408 GEN_INT (14), GEN_INT (0));
3409 }
3410
3411 /* Generate return instruction. */
3412 emit_jump_insn (gen_pop25return ());
3413 }
3414
3415 /* Return nonzero if this function is known to have a null epilogue.
3416 This allows the optimizer to omit jumps to jumps if no stack
3417 was created. */
3418 int
3419 nds32_can_use_return_insn (void)
3420 {
3421 /* Prior to reloading, we can't tell how many registers must be saved.
3422 Thus we can not determine whether this function has null epilogue. */
3423 if (!reload_completed)
3424 return 0;
3425
3426 /* If no stack was created, two conditions must be satisfied:
3427 1. This is a naked function.
3428 So there is no callee-saved, local size, or outgoing size.
3429 2. This is NOT a variadic function.
3430 So there is no pushing arguement registers into the stack. */
3431 return (cfun->machine->naked_p && (cfun->machine->va_args_size == 0));
3432 }
3433
3434 /* ------------------------------------------------------------------------ */
3435
3436 /* Function to test 333-form for load/store instructions.
3437 This is auxiliary extern function for auxiliary macro in nds32.h.
3438 Because it is a little complicated, we use function instead of macro. */
3439 bool
3440 nds32_ls_333_p (rtx rt, rtx ra, rtx imm, machine_mode mode)
3441 {
3442 if (REGNO_REG_CLASS (REGNO (rt)) == LOW_REGS
3443 && REGNO_REG_CLASS (REGNO (ra)) == LOW_REGS)
3444 {
3445 if (GET_MODE_SIZE (mode) == 4)
3446 return satisfies_constraint_Iu05 (imm);
3447
3448 if (GET_MODE_SIZE (mode) == 2)
3449 return satisfies_constraint_Iu04 (imm);
3450
3451 if (GET_MODE_SIZE (mode) == 1)
3452 return satisfies_constraint_Iu03 (imm);
3453 }
3454
3455 return false;
3456 }
3457
3458
3459 /* Computing the Length of an Insn.
3460 Modifies the length assigned to instruction INSN.
3461 LEN is the initially computed length of the insn. */
3462 int
3463 nds32_adjust_insn_length (rtx_insn *insn, int length)
3464 {
3465 rtx src, dst;
3466
3467 switch (recog_memoized (insn))
3468 {
3469 case CODE_FOR_move_df:
3470 case CODE_FOR_move_di:
3471 /* Adjust length of movd44 to 2. */
3472 src = XEXP (PATTERN (insn), 1);
3473 dst = XEXP (PATTERN (insn), 0);
3474
3475 if (REG_P (src)
3476 && REG_P (dst)
3477 && (REGNO (src) % 2) == 0
3478 && (REGNO (dst) % 2) == 0)
3479 length = 2;
3480 break;
3481
3482 default:
3483 break;
3484 }
3485
3486 return length;
3487 }
3488
3489
3490 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
3491 int
3492 nds32_target_alignment (rtx label)
3493 {
3494 rtx_insn *insn;
3495
3496 if (optimize_size)
3497 return 0;
3498
3499 insn = next_active_insn (label);
3500
3501 if (insn == 0)
3502 return 0;
3503 else if ((get_attr_length (insn) % 4) == 0)
3504 return 2;
3505 else
3506 return 0;
3507 }
3508
3509 /* ------------------------------------------------------------------------ */
3510
3511 /* PART 5: Initialize target hook structure and definitions. */
3512 \f
3513 /* Controlling the Compilation Driver. */
3514
3515 \f
3516 /* Run-time Target Specification. */
3517
3518 \f
3519 /* Defining Data Structures for Per-function Information. */
3520
3521 \f
3522 /* Storage Layout. */
3523
3524 #undef TARGET_PROMOTE_FUNCTION_MODE
3525 #define TARGET_PROMOTE_FUNCTION_MODE \
3526 default_promote_function_mode_always_promote
3527
3528 \f
3529 /* Layout of Source Language Data Types. */
3530
3531 \f
3532 /* Register Usage. */
3533
3534 /* -- Basic Characteristics of Registers. */
3535
3536 /* -- Order of Allocation of Registers. */
3537
3538 /* -- How Values Fit in Registers. */
3539
3540 /* -- Handling Leaf Functions. */
3541
3542 /* -- Registers That Form a Stack. */
3543
3544 \f
3545 /* Register Classes. */
3546
3547 #undef TARGET_CLASS_MAX_NREGS
3548 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
3549
3550 #undef TARGET_LRA_P
3551 #define TARGET_LRA_P hook_bool_void_true
3552
3553 #undef TARGET_REGISTER_PRIORITY
3554 #define TARGET_REGISTER_PRIORITY nds32_register_priority
3555
3556 \f
3557 /* Obsolete Macros for Defining Constraints. */
3558
3559 \f
3560 /* Stack Layout and Calling Conventions. */
3561
3562 /* -- Basic Stack Layout. */
3563
3564 /* -- Exception Handling Support. */
3565
3566 /* -- Specifying How Stack Checking is Done. */
3567
3568 /* -- Registers That Address the Stack Frame. */
3569
3570 /* -- Eliminating Frame Pointer and Arg Pointer. */
3571
3572 #undef TARGET_CAN_ELIMINATE
3573 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
3574
3575 /* -- Passing Function Arguments on the Stack. */
3576
3577 /* -- Passing Arguments in Registers. */
3578
3579 #undef TARGET_FUNCTION_ARG
3580 #define TARGET_FUNCTION_ARG nds32_function_arg
3581
3582 #undef TARGET_MUST_PASS_IN_STACK
3583 #define TARGET_MUST_PASS_IN_STACK nds32_must_pass_in_stack
3584
3585 #undef TARGET_ARG_PARTIAL_BYTES
3586 #define TARGET_ARG_PARTIAL_BYTES nds32_arg_partial_bytes
3587
3588 #undef TARGET_FUNCTION_ARG_ADVANCE
3589 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
3590
3591 #undef TARGET_FUNCTION_ARG_BOUNDARY
3592 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
3593
3594 /* -- How Scalar Function Values Are Returned. */
3595
3596 #undef TARGET_FUNCTION_VALUE
3597 #define TARGET_FUNCTION_VALUE nds32_function_value
3598
3599 #undef TARGET_LIBCALL_VALUE
3600 #define TARGET_LIBCALL_VALUE nds32_libcall_value
3601
3602 #undef TARGET_FUNCTION_VALUE_REGNO_P
3603 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
3604
3605 /* -- How Large Values Are Returned. */
3606
3607 /* -- Caller-Saves Register Allocation. */
3608
3609 /* -- Function Entry and Exit. */
3610
3611 #undef TARGET_ASM_FUNCTION_PROLOGUE
3612 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
3613
3614 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
3615 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
3616
3617 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
3618 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
3619
3620 #undef TARGET_ASM_FUNCTION_EPILOGUE
3621 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
3622
3623 #undef TARGET_ASM_OUTPUT_MI_THUNK
3624 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
3625
3626 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3627 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
3628
3629 /* -- Generating Code for Profiling. */
3630
3631 /* -- Permitting tail calls. */
3632
3633 #undef TARGET_WARN_FUNC_RETURN
3634 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
3635
3636 /* Stack smashing protection. */
3637
3638 \f
3639 /* Implementing the Varargs Macros. */
3640
3641 #undef TARGET_SETUP_INCOMING_VARARGS
3642 #define TARGET_SETUP_INCOMING_VARARGS nds32_setup_incoming_varargs
3643
3644 #undef TARGET_STRICT_ARGUMENT_NAMING
3645 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
3646
3647 \f
3648 /* Trampolines for Nested Functions. */
3649
3650 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
3651 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
3652
3653 #undef TARGET_TRAMPOLINE_INIT
3654 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
3655
3656 \f
3657 /* Implicit Calls to Library Routines. */
3658
3659 \f
3660 /* Addressing Modes. */
3661
3662 #undef TARGET_LEGITIMATE_ADDRESS_P
3663 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
3664
3665 \f
3666 /* Anchored Addresses. */
3667
3668 \f
3669 /* Condition Code Status. */
3670
3671 /* -- Representation of condition codes using (cc0). */
3672
3673 /* -- Representation of condition codes using registers. */
3674
3675 /* -- Macros to control conditional execution. */
3676
3677 \f
3678 /* Describing Relative Costs of Operations. */
3679
3680 #undef TARGET_REGISTER_MOVE_COST
3681 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
3682
3683 #undef TARGET_MEMORY_MOVE_COST
3684 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
3685
3686 #undef TARGET_RTX_COSTS
3687 #define TARGET_RTX_COSTS nds32_rtx_costs
3688
3689 #undef TARGET_ADDRESS_COST
3690 #define TARGET_ADDRESS_COST nds32_address_cost
3691
3692 \f
3693 /* Adjusting the Instruction Scheduler. */
3694
3695 \f
3696 /* Dividing the Output into Sections (Texts, Data, . . . ). */
3697
3698 #undef TARGET_ENCODE_SECTION_INFO
3699 #define TARGET_ENCODE_SECTION_INFO nds32_encode_section_info
3700
3701 \f
3702 /* Position Independent Code. */
3703
3704 \f
3705 /* Defining the Output Assembler Language. */
3706
3707 /* -- The Overall Framework of an Assembler File. */
3708
3709 #undef TARGET_ASM_FILE_START
3710 #define TARGET_ASM_FILE_START nds32_asm_file_start
3711 #undef TARGET_ASM_FILE_END
3712 #define TARGET_ASM_FILE_END nds32_asm_file_end
3713
3714 /* -- Output of Data. */
3715
3716 #undef TARGET_ASM_ALIGNED_HI_OP
3717 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3718
3719 #undef TARGET_ASM_ALIGNED_SI_OP
3720 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
3721
3722 /* -- Output of Uninitialized Variables. */
3723
3724 /* -- Output and Generation of Labels. */
3725
3726 #undef TARGET_ASM_GLOBALIZE_LABEL
3727 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
3728
3729 /* -- How Initialization Functions Are Handled. */
3730
3731 /* -- Macros Controlling Initialization Routines. */
3732
3733 /* -- Output of Assembler Instructions. */
3734
3735 #undef TARGET_PRINT_OPERAND
3736 #define TARGET_PRINT_OPERAND nds32_print_operand
3737 #undef TARGET_PRINT_OPERAND_ADDRESS
3738 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
3739
3740 /* -- Output of Dispatch Tables. */
3741
3742 /* -- Assembler Commands for Exception Regions. */
3743
3744 /* -- Assembler Commands for Alignment. */
3745
3746 \f
3747 /* Controlling Debugging Information Format. */
3748
3749 /* -- Macros Affecting All Debugging Formats. */
3750
3751 /* -- Specific Options for DBX Output. */
3752
3753 /* -- Open-Ended Hooks for DBX Format. */
3754
3755 /* -- File Names in DBX Format. */
3756
3757 /* -- Macros for SDB and DWARF Output. */
3758
3759 /* -- Macros for VMS Debug Format. */
3760
3761 \f
3762 /* Cross Compilation and Floating Point. */
3763
3764 \f
3765 /* Mode Switching Instructions. */
3766
3767 \f
3768 /* Defining target-specific uses of __attribute__. */
3769
3770 #undef TARGET_ATTRIBUTE_TABLE
3771 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
3772
3773 #undef TARGET_MERGE_DECL_ATTRIBUTES
3774 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
3775
3776 #undef TARGET_INSERT_ATTRIBUTES
3777 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
3778
3779 #undef TARGET_OPTION_PRAGMA_PARSE
3780 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
3781
3782 #undef TARGET_OPTION_OVERRIDE
3783 #define TARGET_OPTION_OVERRIDE nds32_option_override
3784
3785 \f
3786 /* Emulating TLS. */
3787
3788 \f
3789 /* Defining coprocessor specifics for MIPS targets. */
3790
3791 \f
3792 /* Parameters for Precompiled Header Validity Checking. */
3793
3794 \f
3795 /* C++ ABI parameters. */
3796
3797 \f
3798 /* Adding support for named address spaces. */
3799
3800 \f
3801 /* Miscellaneous Parameters. */
3802
3803 #undef TARGET_INIT_BUILTINS
3804 #define TARGET_INIT_BUILTINS nds32_init_builtins
3805
3806 #undef TARGET_EXPAND_BUILTIN
3807 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
3808
3809 \f
3810 /* ------------------------------------------------------------------------ */
3811
3812 /* Initialize the GCC target structure. */
3813
3814 struct gcc_target targetm = TARGET_INITIALIZER;
3815
3816 /* ------------------------------------------------------------------------ */