]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/nds32/nds32.c
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / config / nds32 / nds32.c
1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* ------------------------------------------------------------------------ */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "alias.h"
28 #include "symtab.h"
29 #include "tree.h"
30 #include "stor-layout.h"
31 #include "varasm.h"
32 #include "calls.h"
33 #include "rtl.h"
34 #include "regs.h"
35 #include "hard-reg-set.h"
36 #include "insn-config.h" /* Required by recog.h. */
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h" /* For DFA state_t. */
40 #include "insn-codes.h" /* For CODE_FOR_xxx. */
41 #include "reload.h" /* For push_reload(). */
42 #include "flags.h"
43 #include "function.h"
44 #include "insn-config.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "emit-rtl.h"
49 #include "stmt.h"
50 #include "expr.h"
51 #include "recog.h"
52 #include "diagnostic-core.h"
53 #include "dominance.h"
54 #include "cfg.h"
55 #include "cfgrtl.h"
56 #include "cfganal.h"
57 #include "lcm.h"
58 #include "cfgbuild.h"
59 #include "cfgcleanup.h"
60 #include "predict.h"
61 #include "basic-block.h"
62 #include "df.h"
63 #include "tm_p.h"
64 #include "tm-constrs.h"
65 #include "optabs.h" /* For GEN_FCN. */
66 #include "target.h"
67 #include "target-def.h"
68 #include "langhooks.h" /* For add_builtin_function(). */
69 #include "builtins.h"
70
71 /* ------------------------------------------------------------------------ */
72
73 /* This file is divided into five parts:
74
75 PART 1: Auxiliary static variable definitions and
76 target hook static variable definitions.
77
78 PART 2: Auxiliary static function definitions.
79
80 PART 3: Implement target hook stuff definitions.
81
82 PART 4: Implemet extern function definitions,
83 the prototype is in nds32-protos.h.
84
85 PART 5: Initialize target hook structure and definitions. */
86
87 /* ------------------------------------------------------------------------ */
88
89 /* PART 1: Auxiliary static variable definitions and
90 target hook static variable definitions. */
91
92 /* Define intrinsic register names.
93 Please refer to nds32_intrinsic.h file, the index is corresponding to
94 'enum nds32_intrinsic_registers' data type values.
95 NOTE that the base value starting from 1024. */
96 static const char * const nds32_intrinsic_register_names[] =
97 {
98 "$PSW", "$IPSW", "$ITYPE", "$IPC"
99 };
100
101 /* Defining target-specific uses of __attribute__. */
102 static const struct attribute_spec nds32_attribute_table[] =
103 {
104 /* Syntax: { name, min_len, max_len, decl_required, type_required,
105 function_type_required, handler, affects_type_identity } */
106
107 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
108 { "interrupt", 1, 64, false, false, false, NULL, false },
109 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
110 { "exception", 1, 8, false, false, false, NULL, false },
111 /* Argument is user's interrupt numbers. The vector number is always 0. */
112 { "reset", 1, 1, false, false, false, NULL, false },
113
114 /* The attributes describing isr nested type. */
115 { "nested", 0, 0, false, false, false, NULL, false },
116 { "not_nested", 0, 0, false, false, false, NULL, false },
117 { "nested_ready", 0, 0, false, false, false, NULL, false },
118
119 /* The attributes describing isr register save scheme. */
120 { "save_all", 0, 0, false, false, false, NULL, false },
121 { "partial_save", 0, 0, false, false, false, NULL, false },
122
123 /* The attributes used by reset attribute. */
124 { "nmi", 1, 1, false, false, false, NULL, false },
125 { "warm", 1, 1, false, false, false, NULL, false },
126
127 /* The attribute telling no prologue/epilogue. */
128 { "naked", 0, 0, false, false, false, NULL, false },
129
130 /* The last attribute spec is set to be NULL. */
131 { NULL, 0, 0, false, false, false, NULL, false }
132 };
133
134
135 /* ------------------------------------------------------------------------ */
136
137 /* PART 2: Auxiliary static function definitions. */
138
139 /* Function to save and restore machine-specific function data. */
140 static struct machine_function *
141 nds32_init_machine_status (void)
142 {
143 struct machine_function *machine;
144 machine = ggc_cleared_alloc<machine_function> ();
145
146 /* Initially assume this function needs prologue/epilogue. */
147 machine->naked_p = 0;
148
149 /* Initially assume this function does NOT use fp_as_gp optimization. */
150 machine->fp_as_gp_p = 0;
151
152 return machine;
153 }
154
155 /* Function to compute stack frame size and
156 store into cfun->machine structure. */
157 static void
158 nds32_compute_stack_frame (void)
159 {
160 int r;
161 int block_size;
162
163 /* Because nds32_compute_stack_frame() will be called from different place,
164 everytime we enter this function, we have to assume this function
165 needs prologue/epilogue. */
166 cfun->machine->naked_p = 0;
167
168 /* Get variadic arguments size to prepare pretend arguments and
169 we will push them into stack at prologue by ourself. */
170 cfun->machine->va_args_size = crtl->args.pretend_args_size;
171 if (cfun->machine->va_args_size != 0)
172 {
173 cfun->machine->va_args_first_regno
174 = NDS32_GPR_ARG_FIRST_REGNUM
175 + NDS32_MAX_GPR_REGS_FOR_ARGS
176 - (crtl->args.pretend_args_size / UNITS_PER_WORD);
177 cfun->machine->va_args_last_regno
178 = NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS - 1;
179 }
180 else
181 {
182 cfun->machine->va_args_first_regno = SP_REGNUM;
183 cfun->machine->va_args_last_regno = SP_REGNUM;
184 }
185
186 /* Important: We need to make sure that varargs area is 8-byte alignment. */
187 block_size = cfun->machine->va_args_size;
188 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
189 {
190 cfun->machine->va_args_area_padding_bytes
191 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
192 }
193
194 /* Get local variables, incoming variables, and temporary variables size.
195 Note that we need to make sure it is 8-byte alignment because
196 there may be no padding bytes if we are using LRA. */
197 cfun->machine->local_size = NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
198
199 /* Get outgoing arguments size. */
200 cfun->machine->out_args_size = crtl->outgoing_args_size;
201
202 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
203 Check whether $fp is ever live. */
204 cfun->machine->fp_size = (df_regs_ever_live_p (FP_REGNUM)) ? 4 : 0;
205
206 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
207 Check whether we are using PIC code genration. */
208 cfun->machine->gp_size = (flag_pic) ? 4 : 0;
209
210 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
211 Check whether $lp is ever live. */
212 cfun->machine->lp_size = (df_regs_ever_live_p (LP_REGNUM)) ? 4 : 0;
213
214 /* Initially there is no padding bytes. */
215 cfun->machine->callee_saved_area_gpr_padding_bytes = 0;
216
217 /* Calculate the bytes of saving callee-saved registers on stack. */
218 cfun->machine->callee_saved_gpr_regs_size = 0;
219 cfun->machine->callee_saved_first_gpr_regno = SP_REGNUM;
220 cfun->machine->callee_saved_last_gpr_regno = SP_REGNUM;
221 /* Currently, there is no need to check $r28~$r31
222 because we will save them in another way. */
223 for (r = 0; r < 28; r++)
224 {
225 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
226 {
227 /* Mark the first required callee-saved register
228 (only need to set it once).
229 If first regno == SP_REGNUM, we can tell that
230 it is the first time to be here. */
231 if (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM)
232 cfun->machine->callee_saved_first_gpr_regno = r;
233 /* Mark the last required callee-saved register. */
234 cfun->machine->callee_saved_last_gpr_regno = r;
235 }
236 }
237
238 /* Check if this function can omit prologue/epilogue code fragment.
239 If there is 'naked' attribute in this function,
240 we can set 'naked_p' flag to indicate that
241 we do not have to generate prologue/epilogue.
242 Or, if all the following conditions succeed,
243 we can set this function 'naked_p' as well:
244 condition 1: first_regno == last_regno == SP_REGNUM,
245 which means we do not have to save
246 any callee-saved registers.
247 condition 2: Both $lp and $fp are NOT live in this function,
248 which means we do not need to save them and there
249 is no outgoing size.
250 condition 3: There is no local_size, which means
251 we do not need to adjust $sp. */
252 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
253 || (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM
254 && cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM
255 && !df_regs_ever_live_p (FP_REGNUM)
256 && !df_regs_ever_live_p (LP_REGNUM)
257 && cfun->machine->local_size == 0))
258 {
259 /* Set this function 'naked_p' and other functions can check this flag.
260 Note that in nds32 port, the 'naked_p = 1' JUST means there is no
261 callee-saved, local size, and outgoing size.
262 The varargs space and ret instruction may still present in
263 the prologue/epilogue expanding. */
264 cfun->machine->naked_p = 1;
265
266 /* No need to save $fp, $gp, and $lp.
267 We should set these value to be zero
268 so that nds32_initial_elimination_offset() can work properly. */
269 cfun->machine->fp_size = 0;
270 cfun->machine->gp_size = 0;
271 cfun->machine->lp_size = 0;
272
273 /* If stack usage computation is required,
274 we need to provide the static stack size. */
275 if (flag_stack_usage_info)
276 current_function_static_stack_size = 0;
277
278 /* No need to do following adjustment, return immediately. */
279 return;
280 }
281
282 /* Adjustment for v3push instructions:
283 If we are using v3push (push25/pop25) instructions,
284 we need to make sure Rb is $r6 and Re is
285 located on $r6, $r8, $r10, or $r14.
286 Some results above will be discarded and recomputed.
287 Note that it is only available under V3/V3M ISA and we
288 DO NOT setup following stuff for isr or variadic function. */
289 if (TARGET_V3PUSH
290 && !nds32_isr_function_p (current_function_decl)
291 && (cfun->machine->va_args_size == 0))
292 {
293 /* Recompute:
294 cfun->machine->fp_size
295 cfun->machine->gp_size
296 cfun->machine->lp_size
297 cfun->machine->callee_saved_regs_first_regno
298 cfun->machine->callee_saved_regs_last_regno */
299
300 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
301 cfun->machine->fp_size = 4;
302 cfun->machine->gp_size = 4;
303 cfun->machine->lp_size = 4;
304
305 /* Remember to set Rb = $r6. */
306 cfun->machine->callee_saved_first_gpr_regno = 6;
307
308 if (cfun->machine->callee_saved_last_gpr_regno <= 6)
309 {
310 /* Re = $r6 */
311 cfun->machine->callee_saved_last_gpr_regno = 6;
312 }
313 else if (cfun->machine->callee_saved_last_gpr_regno <= 8)
314 {
315 /* Re = $r8 */
316 cfun->machine->callee_saved_last_gpr_regno = 8;
317 }
318 else if (cfun->machine->callee_saved_last_gpr_regno <= 10)
319 {
320 /* Re = $r10 */
321 cfun->machine->callee_saved_last_gpr_regno = 10;
322 }
323 else if (cfun->machine->callee_saved_last_gpr_regno <= 14)
324 {
325 /* Re = $r14 */
326 cfun->machine->callee_saved_last_gpr_regno = 14;
327 }
328 else if (cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM)
329 {
330 /* If last_regno is SP_REGNUM, which means
331 it is never changed, so set it to Re = $r6. */
332 cfun->machine->callee_saved_last_gpr_regno = 6;
333 }
334 else
335 {
336 /* The program flow should not go here. */
337 gcc_unreachable ();
338 }
339 }
340
341 /* We have correctly set callee_saved_regs_first_regno
342 and callee_saved_regs_last_regno.
343 Initially, the callee_saved_regs_size is supposed to be 0.
344 As long as callee_saved_regs_last_regno is not SP_REGNUM,
345 we can update callee_saved_regs_size with new size. */
346 if (cfun->machine->callee_saved_last_gpr_regno != SP_REGNUM)
347 {
348 /* Compute pushed size of callee-saved registers. */
349 cfun->machine->callee_saved_gpr_regs_size
350 = 4 * (cfun->machine->callee_saved_last_gpr_regno
351 - cfun->machine->callee_saved_first_gpr_regno
352 + 1);
353 }
354
355 /* Important: We need to make sure that
356 (fp_size + gp_size + lp_size + callee_saved_regs_size)
357 is 8-byte alignment.
358 If it is not, calculate the padding bytes. */
359 block_size = cfun->machine->fp_size
360 + cfun->machine->gp_size
361 + cfun->machine->lp_size
362 + cfun->machine->callee_saved_gpr_regs_size;
363 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
364 {
365 cfun->machine->callee_saved_area_gpr_padding_bytes
366 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
367 }
368
369 /* If stack usage computation is required,
370 we need to provide the static stack size. */
371 if (flag_stack_usage_info)
372 {
373 current_function_static_stack_size
374 = NDS32_ROUND_UP_DOUBLE_WORD (block_size)
375 + cfun->machine->local_size
376 + cfun->machine->out_args_size;
377 }
378 }
379
380 /* Function to create a parallel rtx pattern
381 which presents stack push multiple behavior.
382 The overall concept are:
383 "push registers to memory",
384 "adjust stack pointer". */
385 static void
386 nds32_emit_stack_push_multiple (rtx Rb, rtx Re, rtx En4, bool vaarg_p)
387 {
388 int regno;
389 int extra_count;
390 int num_use_regs;
391 int par_index;
392 int offset;
393 int save_fp, save_gp, save_lp;
394
395 rtx reg;
396 rtx mem;
397 rtx push_rtx;
398 rtx adjust_sp_rtx;
399 rtx parallel_insn;
400 rtx dwarf;
401
402 /* We need to provide a customized rtx which contains
403 necessary information for data analysis,
404 so we create a parallel rtx like this:
405 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
406 (reg:SI Rb))
407 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
408 (reg:SI Rb+1))
409 ...
410 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
411 (reg:SI Re))
412 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
413 (reg:SI FP_REGNUM))
414 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
415 (reg:SI GP_REGNUM))
416 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
417 (reg:SI LP_REGNUM))
418 (set (reg:SI SP_REGNUM)
419 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
420
421 /* Determine whether we need to save $fp, $gp, or $lp. */
422 save_fp = INTVAL (En4) & 0x8;
423 save_gp = INTVAL (En4) & 0x4;
424 save_lp = INTVAL (En4) & 0x2;
425
426 /* Calculate the number of registers that will be pushed. */
427 extra_count = 0;
428 if (save_fp)
429 extra_count++;
430 if (save_gp)
431 extra_count++;
432 if (save_lp)
433 extra_count++;
434 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
435 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
436 num_use_regs = extra_count;
437 else
438 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
439
440 /* In addition to used registers,
441 we need one more space for (set sp sp-x) rtx. */
442 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
443 rtvec_alloc (num_use_regs + 1));
444 par_index = 0;
445
446 /* Initialize offset and start to create push behavior. */
447 offset = -(num_use_regs * 4);
448
449 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
450 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
451 {
452 /* Rb and Re may be SP_REGNUM.
453 We need to break this loop immediately. */
454 if (regno == SP_REGNUM)
455 break;
456
457 reg = gen_rtx_REG (SImode, regno);
458 mem = gen_frame_mem (SImode, plus_constant (Pmode,
459 stack_pointer_rtx,
460 offset));
461 push_rtx = gen_rtx_SET (mem, reg);
462 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
463 RTX_FRAME_RELATED_P (push_rtx) = 1;
464 offset = offset + 4;
465 par_index++;
466 }
467
468 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
469 if (save_fp)
470 {
471 reg = gen_rtx_REG (SImode, FP_REGNUM);
472 mem = gen_frame_mem (SImode, plus_constant (Pmode,
473 stack_pointer_rtx,
474 offset));
475 push_rtx = gen_rtx_SET (mem, reg);
476 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
477 RTX_FRAME_RELATED_P (push_rtx) = 1;
478 offset = offset + 4;
479 par_index++;
480 }
481 if (save_gp)
482 {
483 reg = gen_rtx_REG (SImode, GP_REGNUM);
484 mem = gen_frame_mem (SImode, plus_constant (Pmode,
485 stack_pointer_rtx,
486 offset));
487 push_rtx = gen_rtx_SET (mem, reg);
488 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
489 RTX_FRAME_RELATED_P (push_rtx) = 1;
490 offset = offset + 4;
491 par_index++;
492 }
493 if (save_lp)
494 {
495 reg = gen_rtx_REG (SImode, LP_REGNUM);
496 mem = gen_frame_mem (SImode, plus_constant (Pmode,
497 stack_pointer_rtx,
498 offset));
499 push_rtx = gen_rtx_SET (mem, reg);
500 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
501 RTX_FRAME_RELATED_P (push_rtx) = 1;
502 offset = offset + 4;
503 par_index++;
504 }
505
506 /* Create (set sp sp-x). */
507
508 /* We need to re-calculate the offset value again for adjustment. */
509 offset = -(num_use_regs * 4);
510 adjust_sp_rtx
511 = gen_rtx_SET (stack_pointer_rtx,
512 plus_constant (Pmode, stack_pointer_rtx, offset));
513 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
514 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
515
516 parallel_insn = emit_insn (parallel_insn);
517
518 /* The insn rtx 'parallel_insn' will change frame layout.
519 We need to use RTX_FRAME_RELATED_P so that GCC is able to
520 generate CFI (Call Frame Information) stuff. */
521 RTX_FRAME_RELATED_P (parallel_insn) = 1;
522
523 /* Don't use GCC's logic for CFI info if we are generate a push for VAARG
524 since we will not restore those register at epilogue. */
525 if (vaarg_p)
526 {
527 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA,
528 copy_rtx (adjust_sp_rtx), NULL_RTX);
529 REG_NOTES (parallel_insn) = dwarf;
530 }
531 }
532
533 /* Function to create a parallel rtx pattern
534 which presents stack pop multiple behavior.
535 The overall concept are:
536 "pop registers from memory",
537 "adjust stack pointer". */
538 static void
539 nds32_emit_stack_pop_multiple (rtx Rb, rtx Re, rtx En4)
540 {
541 int regno;
542 int extra_count;
543 int num_use_regs;
544 int par_index;
545 int offset;
546 int save_fp, save_gp, save_lp;
547
548 rtx reg;
549 rtx mem;
550 rtx pop_rtx;
551 rtx adjust_sp_rtx;
552 rtx parallel_insn;
553 rtx dwarf = NULL_RTX;
554
555 /* We need to provide a customized rtx which contains
556 necessary information for data analysis,
557 so we create a parallel rtx like this:
558 (parallel [(set (reg:SI Rb)
559 (mem (reg:SI SP_REGNUM)))
560 (set (reg:SI Rb+1)
561 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
562 ...
563 (set (reg:SI Re)
564 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
565 (set (reg:SI FP_REGNUM)
566 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
567 (set (reg:SI GP_REGNUM)
568 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
569 (set (reg:SI LP_REGNUM)
570 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
571 (set (reg:SI SP_REGNUM)
572 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
573
574 /* Determine whether we need to restore $fp, $gp, or $lp. */
575 save_fp = INTVAL (En4) & 0x8;
576 save_gp = INTVAL (En4) & 0x4;
577 save_lp = INTVAL (En4) & 0x2;
578
579 /* Calculate the number of registers that will be poped. */
580 extra_count = 0;
581 if (save_fp)
582 extra_count++;
583 if (save_gp)
584 extra_count++;
585 if (save_lp)
586 extra_count++;
587 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
588 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
589 num_use_regs = extra_count;
590 else
591 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
592
593 /* In addition to used registers,
594 we need one more space for (set sp sp+x) rtx. */
595 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
596 rtvec_alloc (num_use_regs + 1));
597 par_index = 0;
598
599 /* Initialize offset and start to create pop behavior. */
600 offset = 0;
601
602 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
603 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
604 {
605 /* Rb and Re may be SP_REGNUM.
606 We need to break this loop immediately. */
607 if (regno == SP_REGNUM)
608 break;
609
610 reg = gen_rtx_REG (SImode, regno);
611 mem = gen_frame_mem (SImode, plus_constant (Pmode,
612 stack_pointer_rtx,
613 offset));
614 pop_rtx = gen_rtx_SET (reg, mem);
615 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
616 RTX_FRAME_RELATED_P (pop_rtx) = 1;
617 offset = offset + 4;
618 par_index++;
619
620 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
621 }
622
623 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
624 if (save_fp)
625 {
626 reg = gen_rtx_REG (SImode, FP_REGNUM);
627 mem = gen_frame_mem (SImode, plus_constant (Pmode,
628 stack_pointer_rtx,
629 offset));
630 pop_rtx = gen_rtx_SET (reg, mem);
631 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
632 RTX_FRAME_RELATED_P (pop_rtx) = 1;
633 offset = offset + 4;
634 par_index++;
635
636 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
637 }
638 if (save_gp)
639 {
640 reg = gen_rtx_REG (SImode, GP_REGNUM);
641 mem = gen_frame_mem (SImode, plus_constant (Pmode,
642 stack_pointer_rtx,
643 offset));
644 pop_rtx = gen_rtx_SET (reg, mem);
645 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
646 RTX_FRAME_RELATED_P (pop_rtx) = 1;
647 offset = offset + 4;
648 par_index++;
649
650 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
651 }
652 if (save_lp)
653 {
654 reg = gen_rtx_REG (SImode, LP_REGNUM);
655 mem = gen_frame_mem (SImode, plus_constant (Pmode,
656 stack_pointer_rtx,
657 offset));
658 pop_rtx = gen_rtx_SET (reg, mem);
659 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
660 RTX_FRAME_RELATED_P (pop_rtx) = 1;
661 offset = offset + 4;
662 par_index++;
663
664 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
665 }
666
667 /* Create (set sp sp+x). */
668
669 /* The offset value is already in place. No need to re-calculate it. */
670 adjust_sp_rtx
671 = gen_rtx_SET (stack_pointer_rtx,
672 plus_constant (Pmode, stack_pointer_rtx, offset));
673 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
674
675 /* Tell gcc we adjust SP in this insn. */
676 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
677
678 parallel_insn = emit_insn (parallel_insn);
679
680 /* The insn rtx 'parallel_insn' will change frame layout.
681 We need to use RTX_FRAME_RELATED_P so that GCC is able to
682 generate CFI (Call Frame Information) stuff. */
683 RTX_FRAME_RELATED_P (parallel_insn) = 1;
684
685 /* Add CFI info by manual. */
686 REG_NOTES (parallel_insn) = dwarf;
687 }
688
689 /* Function to create a parallel rtx pattern
690 which presents stack v3push behavior.
691 The overall concept are:
692 "push registers to memory",
693 "adjust stack pointer". */
694 static void
695 nds32_emit_stack_v3push (rtx Rb,
696 rtx Re,
697 rtx En4 ATTRIBUTE_UNUSED,
698 rtx imm8u)
699 {
700 int regno;
701 int num_use_regs;
702 int par_index;
703 int offset;
704
705 rtx reg;
706 rtx mem;
707 rtx push_rtx;
708 rtx adjust_sp_rtx;
709 rtx parallel_insn;
710
711 /* We need to provide a customized rtx which contains
712 necessary information for data analysis,
713 so we create a parallel rtx like this:
714 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
715 (reg:SI Rb))
716 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
717 (reg:SI Rb+1))
718 ...
719 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
720 (reg:SI Re))
721 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
722 (reg:SI FP_REGNUM))
723 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
724 (reg:SI GP_REGNUM))
725 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
726 (reg:SI LP_REGNUM))
727 (set (reg:SI SP_REGNUM)
728 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
729
730 /* Calculate the number of registers that will be pushed.
731 Since $fp, $gp, and $lp is always pushed with v3push instruction,
732 we need to count these three registers.
733 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
734 So there is no need to worry about Rb=Re=SP_REGNUM case. */
735 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
736
737 /* In addition to used registers,
738 we need one more space for (set sp sp-x-imm8u) rtx. */
739 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
740 rtvec_alloc (num_use_regs + 1));
741 par_index = 0;
742
743 /* Initialize offset and start to create push behavior. */
744 offset = -(num_use_regs * 4);
745
746 /* Create (set mem regX) from Rb, Rb+1 up to Re.
747 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
748 So there is no need to worry about Rb=Re=SP_REGNUM case. */
749 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
750 {
751 reg = gen_rtx_REG (SImode, regno);
752 mem = gen_frame_mem (SImode, plus_constant (Pmode,
753 stack_pointer_rtx,
754 offset));
755 push_rtx = gen_rtx_SET (mem, reg);
756 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
757 RTX_FRAME_RELATED_P (push_rtx) = 1;
758 offset = offset + 4;
759 par_index++;
760 }
761
762 /* Create (set mem fp). */
763 reg = gen_rtx_REG (SImode, FP_REGNUM);
764 mem = gen_frame_mem (SImode, plus_constant (Pmode,
765 stack_pointer_rtx,
766 offset));
767 push_rtx = gen_rtx_SET (mem, reg);
768 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
769 RTX_FRAME_RELATED_P (push_rtx) = 1;
770 offset = offset + 4;
771 par_index++;
772 /* Create (set mem gp). */
773 reg = gen_rtx_REG (SImode, GP_REGNUM);
774 mem = gen_frame_mem (SImode, plus_constant (Pmode,
775 stack_pointer_rtx,
776 offset));
777 push_rtx = gen_rtx_SET (mem, reg);
778 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
779 RTX_FRAME_RELATED_P (push_rtx) = 1;
780 offset = offset + 4;
781 par_index++;
782 /* Create (set mem lp). */
783 reg = gen_rtx_REG (SImode, LP_REGNUM);
784 mem = gen_frame_mem (SImode, plus_constant (Pmode,
785 stack_pointer_rtx,
786 offset));
787 push_rtx = gen_rtx_SET (mem, reg);
788 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
789 RTX_FRAME_RELATED_P (push_rtx) = 1;
790 offset = offset + 4;
791 par_index++;
792
793 /* Create (set sp sp-x-imm8u). */
794
795 /* We need to re-calculate the offset value again for adjustment. */
796 offset = -(num_use_regs * 4);
797 adjust_sp_rtx
798 = gen_rtx_SET (stack_pointer_rtx,
799 plus_constant (Pmode,
800 stack_pointer_rtx,
801 offset - INTVAL (imm8u)));
802 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
803 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
804
805 parallel_insn = emit_insn (parallel_insn);
806
807 /* The insn rtx 'parallel_insn' will change frame layout.
808 We need to use RTX_FRAME_RELATED_P so that GCC is able to
809 generate CFI (Call Frame Information) stuff. */
810 RTX_FRAME_RELATED_P (parallel_insn) = 1;
811 }
812
813 /* Function to create a parallel rtx pattern
814 which presents stack v3pop behavior.
815 The overall concept are:
816 "pop registers from memory",
817 "adjust stack pointer". */
818 static void
819 nds32_emit_stack_v3pop (rtx Rb,
820 rtx Re,
821 rtx En4 ATTRIBUTE_UNUSED,
822 rtx imm8u)
823 {
824 int regno;
825 int num_use_regs;
826 int par_index;
827 int offset;
828
829 rtx reg;
830 rtx mem;
831 rtx pop_rtx;
832 rtx adjust_sp_rtx;
833 rtx parallel_insn;
834 rtx dwarf = NULL_RTX;
835
836 /* We need to provide a customized rtx which contains
837 necessary information for data analysis,
838 so we create a parallel rtx like this:
839 (parallel [(set (reg:SI Rb)
840 (mem (reg:SI SP_REGNUM)))
841 (set (reg:SI Rb+1)
842 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
843 ...
844 (set (reg:SI Re)
845 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
846 (set (reg:SI FP_REGNUM)
847 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
848 (set (reg:SI GP_REGNUM)
849 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
850 (set (reg:SI LP_REGNUM)
851 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
852 (set (reg:SI SP_REGNUM)
853 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
854
855 /* Calculate the number of registers that will be poped.
856 Since $fp, $gp, and $lp is always poped with v3pop instruction,
857 we need to count these three registers.
858 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
859 So there is no need to worry about Rb=Re=SP_REGNUM case. */
860 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
861
862 /* In addition to used registers,
863 we need one more space for (set sp sp+x+imm8u) rtx. */
864 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
865 rtvec_alloc (num_use_regs + 1));
866 par_index = 0;
867
868 /* Initialize offset and start to create pop behavior. */
869 offset = 0;
870
871 /* Create (set regX mem) from Rb, Rb+1 up to Re.
872 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
873 So there is no need to worry about Rb=Re=SP_REGNUM case. */
874 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
875 {
876 reg = gen_rtx_REG (SImode, regno);
877 mem = gen_frame_mem (SImode, plus_constant (Pmode,
878 stack_pointer_rtx,
879 offset));
880 pop_rtx = gen_rtx_SET (reg, mem);
881 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
882 RTX_FRAME_RELATED_P (pop_rtx) = 1;
883 offset = offset + 4;
884 par_index++;
885
886 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
887 }
888
889 /* Create (set fp mem). */
890 reg = gen_rtx_REG (SImode, FP_REGNUM);
891 mem = gen_frame_mem (SImode, plus_constant (Pmode,
892 stack_pointer_rtx,
893 offset));
894 pop_rtx = gen_rtx_SET (reg, mem);
895 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
896 RTX_FRAME_RELATED_P (pop_rtx) = 1;
897 offset = offset + 4;
898 par_index++;
899 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
900
901 /* Create (set gp mem). */
902 reg = gen_rtx_REG (SImode, GP_REGNUM);
903 mem = gen_frame_mem (SImode, plus_constant (Pmode,
904 stack_pointer_rtx,
905 offset));
906 pop_rtx = gen_rtx_SET (reg, mem);
907 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
908 RTX_FRAME_RELATED_P (pop_rtx) = 1;
909 offset = offset + 4;
910 par_index++;
911 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
912
913 /* Create (set lp mem ). */
914 reg = gen_rtx_REG (SImode, LP_REGNUM);
915 mem = gen_frame_mem (SImode, plus_constant (Pmode,
916 stack_pointer_rtx,
917 offset));
918 pop_rtx = gen_rtx_SET (reg, mem);
919 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
920 RTX_FRAME_RELATED_P (pop_rtx) = 1;
921 offset = offset + 4;
922 par_index++;
923 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
924
925 /* Create (set sp sp+x+imm8u). */
926
927 /* The offset value is already in place. No need to re-calculate it. */
928 adjust_sp_rtx
929 = gen_rtx_SET (stack_pointer_rtx,
930 plus_constant (Pmode,
931 stack_pointer_rtx,
932 offset + INTVAL (imm8u)));
933 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
934
935 /* Tell gcc we adjust SP in this insn. */
936 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
937
938 parallel_insn = emit_insn (parallel_insn);
939
940 /* The insn rtx 'parallel_insn' will change frame layout.
941 We need to use RTX_FRAME_RELATED_P so that GCC is able to
942 generate CFI (Call Frame Information) stuff. */
943 RTX_FRAME_RELATED_P (parallel_insn) = 1;
944
945 /* Add CFI info by manual. */
946 REG_NOTES (parallel_insn) = dwarf;
947 }
948
949 /* Function that may creates more instructions
950 for large value on adjusting stack pointer.
951
952 In nds32 target, 'addi' can be used for stack pointer
953 adjustment in prologue/epilogue stage.
954 However, sometimes there are too many local variables so that
955 the adjustment value is not able to be fit in the 'addi' instruction.
956 One solution is to move value into a register
957 and then use 'add' instruction.
958 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
959 Also, we need to return zero for sp adjustment so that
960 proglogue/epilogue knows there is no need to create 'addi' instruction. */
961 static int
962 nds32_force_addi_stack_int (int full_value)
963 {
964 int adjust_value;
965
966 rtx tmp_reg;
967 rtx sp_adjust_insn;
968
969 if (!satisfies_constraint_Is15 (GEN_INT (full_value)))
970 {
971 /* The value is not able to fit in single addi instruction.
972 Create more instructions of moving value into a register
973 and then add stack pointer with it. */
974
975 /* $r15 is going to be temporary register to hold the value. */
976 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
977
978 /* Create one more instruction to move value
979 into the temporary register. */
980 emit_move_insn (tmp_reg, GEN_INT (full_value));
981
982 /* Create new 'add' rtx. */
983 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
984 stack_pointer_rtx,
985 tmp_reg);
986 /* Emit rtx into insn list and receive its transformed insn rtx. */
987 sp_adjust_insn = emit_insn (sp_adjust_insn);
988
989 /* At prologue, we need to tell GCC that this is frame related insn,
990 so that we can consider this instruction to output debug information.
991 If full_value is NEGATIVE, it means this function
992 is invoked by expand_prologue. */
993 if (full_value < 0)
994 {
995 /* Because (tmp_reg <- full_value) may be split into two
996 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
997 We need to construct another (sp <- sp + full_value)
998 and then insert it into sp_adjust_insn's reg note to
999 represent a frame related expression.
1000 GCC knows how to refer it and output debug information. */
1001
1002 rtx plus_rtx;
1003 rtx set_rtx;
1004
1005 plus_rtx = plus_constant (Pmode, stack_pointer_rtx, full_value);
1006 set_rtx = gen_rtx_SET (stack_pointer_rtx, plus_rtx);
1007 add_reg_note (sp_adjust_insn, REG_FRAME_RELATED_EXPR, set_rtx);
1008
1009 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
1010 }
1011
1012 /* We have used alternative way to adjust stack pointer value.
1013 Return zero so that prologue/epilogue
1014 will not generate other instructions. */
1015 return 0;
1016 }
1017 else
1018 {
1019 /* The value is able to fit in addi instruction.
1020 However, remember to make it to be positive value
1021 because we want to return 'adjustment' result. */
1022 adjust_value = (full_value < 0) ? (-full_value) : (full_value);
1023
1024 return adjust_value;
1025 }
1026 }
1027
1028 /* Return true if MODE/TYPE need double word alignment. */
1029 static bool
1030 nds32_needs_double_word_align (machine_mode mode, const_tree type)
1031 {
1032 unsigned int align;
1033
1034 /* Pick up the alignment according to the mode or type. */
1035 align = NDS32_MODE_TYPE_ALIGN (mode, type);
1036
1037 return (align > PARM_BOUNDARY);
1038 }
1039
1040 /* Return true if FUNC is a naked function. */
1041 static bool
1042 nds32_naked_function_p (tree func)
1043 {
1044 tree t;
1045
1046 if (TREE_CODE (func) != FUNCTION_DECL)
1047 abort ();
1048
1049 t = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
1050
1051 return (t != NULL_TREE);
1052 }
1053
1054 /* Function that check if 'X' is a valid address register.
1055 The variable 'STRICT' is very important to
1056 make decision for register number.
1057
1058 STRICT : true
1059 => We are in reload pass or after reload pass.
1060 The register number should be strictly limited in general registers.
1061
1062 STRICT : false
1063 => Before reload pass, we are free to use any register number. */
1064 static bool
1065 nds32_address_register_rtx_p (rtx x, bool strict)
1066 {
1067 int regno;
1068
1069 if (GET_CODE (x) != REG)
1070 return false;
1071
1072 regno = REGNO (x);
1073
1074 if (strict)
1075 return REGNO_OK_FOR_BASE_P (regno);
1076 else
1077 return true;
1078 }
1079
1080 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1081
1082 OUTER_MODE : Machine mode of outer address rtx.
1083 INDEX : Check if this rtx is valid to be a index for address.
1084 STRICT : If it is true, we are in reload pass or after reload pass. */
1085 static bool
1086 nds32_legitimate_index_p (machine_mode outer_mode,
1087 rtx index,
1088 bool strict)
1089 {
1090 int regno;
1091 rtx op0;
1092 rtx op1;
1093
1094 switch (GET_CODE (index))
1095 {
1096 case REG:
1097 regno = REGNO (index);
1098 /* If we are in reload pass or after reload pass,
1099 we need to limit it to general register. */
1100 if (strict)
1101 return REGNO_OK_FOR_INDEX_P (regno);
1102 else
1103 return true;
1104
1105 case CONST_INT:
1106 /* The alignment of the integer value is determined by 'outer_mode'. */
1107 if (GET_MODE_SIZE (outer_mode) == 1)
1108 {
1109 /* Further check if the value is legal for the 'outer_mode'. */
1110 if (!satisfies_constraint_Is15 (index))
1111 return false;
1112
1113 /* Pass all test, the value is valid, return true. */
1114 return true;
1115 }
1116 if (GET_MODE_SIZE (outer_mode) == 2
1117 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index)))
1118 {
1119 /* Further check if the value is legal for the 'outer_mode'. */
1120 if (!satisfies_constraint_Is16 (index))
1121 return false;
1122
1123 /* Pass all test, the value is valid, return true. */
1124 return true;
1125 }
1126 if (GET_MODE_SIZE (outer_mode) == 4
1127 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1128 {
1129 /* Further check if the value is legal for the 'outer_mode'. */
1130 if (!satisfies_constraint_Is17 (index))
1131 return false;
1132
1133 /* Pass all test, the value is valid, return true. */
1134 return true;
1135 }
1136 if (GET_MODE_SIZE (outer_mode) == 8
1137 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1138 {
1139 /* Further check if the value is legal for the 'outer_mode'. */
1140 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index) + 4,
1141 SImode)))
1142 return false;
1143
1144 /* Pass all test, the value is valid, return true. */
1145 return true;
1146 }
1147
1148 return false;
1149
1150 case MULT:
1151 op0 = XEXP (index, 0);
1152 op1 = XEXP (index, 1);
1153
1154 if (REG_P (op0) && CONST_INT_P (op1))
1155 {
1156 int multiplier;
1157 multiplier = INTVAL (op1);
1158
1159 /* We only allow (mult reg const_int_1)
1160 or (mult reg const_int_2) or (mult reg const_int_4). */
1161 if (multiplier != 1 && multiplier != 2 && multiplier != 4)
1162 return false;
1163
1164 regno = REGNO (op0);
1165 /* Limit it in general registers if we are
1166 in reload pass or after reload pass. */
1167 if(strict)
1168 return REGNO_OK_FOR_INDEX_P (regno);
1169 else
1170 return true;
1171 }
1172
1173 return false;
1174
1175 case ASHIFT:
1176 op0 = XEXP (index, 0);
1177 op1 = XEXP (index, 1);
1178
1179 if (REG_P (op0) && CONST_INT_P (op1))
1180 {
1181 int sv;
1182 /* op1 is already the sv value for use to do left shift. */
1183 sv = INTVAL (op1);
1184
1185 /* We only allow (ashift reg const_int_0)
1186 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1187 if (sv != 0 && sv != 1 && sv !=2)
1188 return false;
1189
1190 regno = REGNO (op0);
1191 /* Limit it in general registers if we are
1192 in reload pass or after reload pass. */
1193 if(strict)
1194 return REGNO_OK_FOR_INDEX_P (regno);
1195 else
1196 return true;
1197 }
1198
1199 return false;
1200
1201 default:
1202 return false;
1203 }
1204 }
1205
1206 /* ------------------------------------------------------------------------ */
1207
1208 /* PART 3: Implement target hook stuff definitions. */
1209 \f
1210 /* Register Classes. */
1211
1212 static unsigned char
1213 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1214 machine_mode mode)
1215 {
1216 /* Return the maximum number of consecutive registers
1217 needed to represent "mode" in a register of "rclass". */
1218 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
1219 }
1220
1221 static int
1222 nds32_register_priority (int hard_regno)
1223 {
1224 /* Encourage to use r0-r7 for LRA when optimize for size. */
1225 if (optimize_size && hard_regno < 8)
1226 return 4;
1227 return 3;
1228 }
1229
1230 \f
1231 /* Stack Layout and Calling Conventions. */
1232
1233 /* There are three kinds of pointer concepts using in GCC compiler:
1234
1235 frame pointer: A pointer to the first location of local variables.
1236 stack pointer: A pointer to the top of a stack frame.
1237 argument pointer: A pointer to the incoming arguments.
1238
1239 In nds32 target calling convention, we are using 8-byte alignment.
1240 Besides, we would like to have each stack frame of a function includes:
1241
1242 [Block A]
1243 1. previous hard frame pointer
1244 2. return address
1245 3. callee-saved registers
1246 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1247 and save it at
1248 cfun->machine->callee_saved_area_padding_bytes)
1249
1250 [Block B]
1251 1. local variables
1252 2. spilling location
1253 3. <padding bytes> (it will be calculated by GCC itself)
1254 4. incoming arguments
1255 5. <padding bytes> (it will be calculated by GCC itself)
1256
1257 [Block C]
1258 1. <padding bytes> (it will be calculated by GCC itself)
1259 2. outgoing arguments
1260
1261 We 'wrap' these blocks together with
1262 hard frame pointer ($r28) and stack pointer ($r31).
1263 By applying the basic frame/stack/argument pointers concept,
1264 the layout of a stack frame shoule be like this:
1265
1266 | |
1267 old stack pointer -> ----
1268 | | \
1269 | | saved arguments for
1270 | | vararg functions
1271 | | /
1272 hard frame pointer -> --
1273 & argument pointer | | \
1274 | | previous hardware frame pointer
1275 | | return address
1276 | | callee-saved registers
1277 | | /
1278 frame pointer -> --
1279 | | \
1280 | | local variables
1281 | | and incoming arguments
1282 | | /
1283 --
1284 | | \
1285 | | outgoing
1286 | | arguments
1287 | | /
1288 stack pointer -> ----
1289
1290 $SFP and $AP are used to represent frame pointer and arguments pointer,
1291 which will be both eliminated as hard frame pointer. */
1292
1293 /* -- Eliminating Frame Pointer and Arg Pointer. */
1294
1295 static bool
1296 nds32_can_eliminate (const int from_reg, const int to_reg)
1297 {
1298 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1299 return true;
1300
1301 if (from_reg == ARG_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1302 return true;
1303
1304 if (from_reg == FRAME_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1305 return true;
1306
1307 if (from_reg == FRAME_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1308 return true;
1309
1310 return false;
1311 }
1312
1313 /* -- Passing Arguments in Registers. */
1314
1315 static rtx
1316 nds32_function_arg (cumulative_args_t ca, machine_mode mode,
1317 const_tree type, bool named)
1318 {
1319 unsigned int regno;
1320 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1321
1322 /* The last time this hook is called,
1323 it is called with MODE == VOIDmode. */
1324 if (mode == VOIDmode)
1325 return NULL_RTX;
1326
1327 /* For nameless arguments, we need to take care it individually. */
1328 if (!named)
1329 {
1330 /* If we are under hard float abi, we have arguments passed on the
1331 stack and all situation can be handled by GCC itself. */
1332 if (TARGET_HARD_FLOAT)
1333 return NULL_RTX;
1334
1335 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1336 {
1337 /* If we still have enough registers to pass argument, pick up
1338 next available register number. */
1339 regno
1340 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1341 return gen_rtx_REG (mode, regno);
1342 }
1343
1344 /* No register available, return NULL_RTX.
1345 The compiler will use stack to pass argument instead. */
1346 return NULL_RTX;
1347 }
1348
1349 /* The following is to handle named argument.
1350 Note that the strategies of TARGET_HARD_FLOAT and !TARGET_HARD_FLOAT
1351 are different. */
1352 if (TARGET_HARD_FLOAT)
1353 {
1354 /* Currently we have not implemented hard float yet. */
1355 gcc_unreachable ();
1356 }
1357 else
1358 {
1359 /* For !TARGET_HARD_FLOAT calling convention, we always use GPR to pass
1360 argument. Since we allow to pass argument partially in registers,
1361 we can just return it if there are still registers available. */
1362 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1363 {
1364 /* Pick up the next available register number. */
1365 regno
1366 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1367 return gen_rtx_REG (mode, regno);
1368 }
1369
1370 }
1371
1372 /* No register available, return NULL_RTX.
1373 The compiler will use stack to pass argument instead. */
1374 return NULL_RTX;
1375 }
1376
1377 static bool
1378 nds32_must_pass_in_stack (machine_mode mode, const_tree type)
1379 {
1380 /* Return true if a type must be passed in memory.
1381 If it is NOT using hard float abi, small aggregates can be
1382 passed in a register even we are calling a variadic function.
1383 So there is no need to take padding into consideration. */
1384 if (TARGET_HARD_FLOAT)
1385 return must_pass_in_stack_var_size_or_pad (mode, type);
1386 else
1387 return must_pass_in_stack_var_size (mode, type);
1388 }
1389
1390 static int
1391 nds32_arg_partial_bytes (cumulative_args_t ca, machine_mode mode,
1392 tree type, bool named ATTRIBUTE_UNUSED)
1393 {
1394 /* Returns the number of bytes at the beginning of an argument that
1395 must be put in registers. The value must be zero for arguments that are
1396 passed entirely in registers or that are entirely pushed on the stack.
1397 Besides, TARGET_FUNCTION_ARG for these arguments should return the
1398 first register to be used by the caller for this argument. */
1399 unsigned int needed_reg_count;
1400 unsigned int remaining_reg_count;
1401 CUMULATIVE_ARGS *cum;
1402
1403 cum = get_cumulative_args (ca);
1404
1405 /* Under hard float abi, we better have argument entirely passed in
1406 registers or pushed on the stack so that we can reduce the complexity
1407 of dealing with cum->gpr_offset and cum->fpr_offset. */
1408 if (TARGET_HARD_FLOAT)
1409 return 0;
1410
1411 /* If we have already runned out of argument registers, return zero
1412 so that the argument will be entirely pushed on the stack. */
1413 if (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1414 >= NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS)
1415 return 0;
1416
1417 /* Calculate how many registers do we need for this argument. */
1418 needed_reg_count = NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1419
1420 /* Calculate how many argument registers have left for passing argument.
1421 Note that we should count it from next available register number. */
1422 remaining_reg_count
1423 = NDS32_MAX_GPR_REGS_FOR_ARGS
1424 - (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1425 - NDS32_GPR_ARG_FIRST_REGNUM);
1426
1427 /* Note that we have to return the nubmer of bytes, not registers count. */
1428 if (needed_reg_count > remaining_reg_count)
1429 return remaining_reg_count * UNITS_PER_WORD;
1430
1431 return 0;
1432 }
1433
1434 static void
1435 nds32_function_arg_advance (cumulative_args_t ca, machine_mode mode,
1436 const_tree type, bool named)
1437 {
1438 machine_mode sub_mode;
1439 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1440
1441 if (named)
1442 {
1443 /* We need to further check TYPE and MODE so that we can determine
1444 which kind of register we shall advance. */
1445 if (type && TREE_CODE (type) == COMPLEX_TYPE)
1446 sub_mode = TYPE_MODE (TREE_TYPE (type));
1447 else
1448 sub_mode = mode;
1449
1450 /* Under hard float abi, we may advance FPR registers. */
1451 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (sub_mode) == MODE_FLOAT)
1452 {
1453 /* Currently we have not implemented hard float yet. */
1454 gcc_unreachable ();
1455 }
1456 else
1457 {
1458 cum->gpr_offset
1459 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1460 - NDS32_GPR_ARG_FIRST_REGNUM
1461 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1462 }
1463 }
1464 else
1465 {
1466 /* If this nameless argument is NOT under TARGET_HARD_FLOAT,
1467 we can advance next register as well so that caller is
1468 able to pass arguments in registers and callee must be
1469 in charge of pushing all of them into stack. */
1470 if (!TARGET_HARD_FLOAT)
1471 {
1472 cum->gpr_offset
1473 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1474 - NDS32_GPR_ARG_FIRST_REGNUM
1475 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1476 }
1477 }
1478 }
1479
1480 static unsigned int
1481 nds32_function_arg_boundary (machine_mode mode, const_tree type)
1482 {
1483 return (nds32_needs_double_word_align (mode, type)
1484 ? NDS32_DOUBLE_WORD_ALIGNMENT
1485 : PARM_BOUNDARY);
1486 }
1487
1488 /* -- How Scalar Function Values Are Returned. */
1489
1490 static rtx
1491 nds32_function_value (const_tree ret_type,
1492 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1493 bool outgoing ATTRIBUTE_UNUSED)
1494 {
1495 machine_mode mode;
1496 int unsignedp;
1497
1498 mode = TYPE_MODE (ret_type);
1499 unsignedp = TYPE_UNSIGNED (ret_type);
1500
1501 mode = promote_mode (ret_type, mode, &unsignedp);
1502
1503 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1504 }
1505
1506 static rtx
1507 nds32_libcall_value (machine_mode mode,
1508 const_rtx fun ATTRIBUTE_UNUSED)
1509 {
1510 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1511 }
1512
1513 static bool
1514 nds32_function_value_regno_p (const unsigned int regno)
1515 {
1516 return (regno == NDS32_GPR_RET_FIRST_REGNUM);
1517 }
1518
1519 /* -- Function Entry and Exit. */
1520
1521 /* The content produced from this function
1522 will be placed before prologue body. */
1523 static void
1524 nds32_asm_function_prologue (FILE *file,
1525 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1526 {
1527 int r;
1528 const char *func_name;
1529 tree attrs;
1530 tree name;
1531
1532 /* All stack frame information is supposed to be
1533 already computed when expanding prologue.
1534 The result is in cfun->machine.
1535 DO NOT call nds32_compute_stack_frame() here
1536 because it may corrupt the essential information. */
1537
1538 fprintf (file, "\t! BEGIN PROLOGUE\n");
1539 fprintf (file, "\t! fp needed: %d\n", frame_pointer_needed);
1540 fprintf (file, "\t! pretend_args: %d\n", cfun->machine->va_args_size);
1541 fprintf (file, "\t! local_size: %d\n", cfun->machine->local_size);
1542 fprintf (file, "\t! out_args_size: %d\n", cfun->machine->out_args_size);
1543
1544 /* Use df_regs_ever_live_p() to detect if the register
1545 is ever used in the current function. */
1546 fprintf (file, "\t! registers ever_live: ");
1547 for (r = 0; r < 32; r++)
1548 {
1549 if (df_regs_ever_live_p (r))
1550 fprintf (file, "%s, ", reg_names[r]);
1551 }
1552 fputc ('\n', file);
1553
1554 /* Display the attributes of this function. */
1555 fprintf (file, "\t! function attributes: ");
1556 /* Get the attributes tree list.
1557 Note that GCC builds attributes list with reverse order. */
1558 attrs = DECL_ATTRIBUTES (current_function_decl);
1559
1560 /* If there is no any attribute, print out "None". */
1561 if (!attrs)
1562 fprintf (file, "None");
1563
1564 /* If there are some attributes, try if we need to
1565 construct isr vector information. */
1566 func_name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
1567 nds32_construct_isr_vectors_information (attrs, func_name);
1568
1569 /* Display all attributes of this function. */
1570 while (attrs)
1571 {
1572 name = TREE_PURPOSE (attrs);
1573 fprintf (file, "%s ", IDENTIFIER_POINTER (name));
1574
1575 /* Pick up the next attribute. */
1576 attrs = TREE_CHAIN (attrs);
1577 }
1578 fputc ('\n', file);
1579 }
1580
1581 /* After rtl prologue has been expanded, this function is used. */
1582 static void
1583 nds32_asm_function_end_prologue (FILE *file)
1584 {
1585 fprintf (file, "\t! END PROLOGUE\n");
1586
1587 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1588 we can generate special directive: ".omit_fp_begin"
1589 to guide linker doing fp-as-gp optimization.
1590 However, for a naked function, which means
1591 it should not have prologue/epilogue,
1592 using fp-as-gp still requires saving $fp by push/pop behavior and
1593 there is no benefit to use fp-as-gp on such small function.
1594 So we need to make sure this function is NOT naked as well. */
1595 if (!frame_pointer_needed
1596 && !cfun->machine->naked_p
1597 && cfun->machine->fp_as_gp_p)
1598 {
1599 fprintf (file, "\t! ----------------------------------------\n");
1600 fprintf (file, "\t! Guide linker to do "
1601 "link time optimization: fp-as-gp\n");
1602 fprintf (file, "\t! We add one more instruction to "
1603 "initialize $fp near to $gp location.\n");
1604 fprintf (file, "\t! If linker fails to use fp-as-gp transformation,\n");
1605 fprintf (file, "\t! this extra instruction should be "
1606 "eliminated at link stage.\n");
1607 fprintf (file, "\t.omit_fp_begin\n");
1608 fprintf (file, "\tla\t$fp,_FP_BASE_\n");
1609 fprintf (file, "\t! ----------------------------------------\n");
1610 }
1611 }
1612
1613 /* Before rtl epilogue has been expanded, this function is used. */
1614 static void
1615 nds32_asm_function_begin_epilogue (FILE *file)
1616 {
1617 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1618 we can generate special directive: ".omit_fp_end"
1619 to claim fp-as-gp optimization range.
1620 However, for a naked function,
1621 which means it should not have prologue/epilogue,
1622 using fp-as-gp still requires saving $fp by push/pop behavior and
1623 there is no benefit to use fp-as-gp on such small function.
1624 So we need to make sure this function is NOT naked as well. */
1625 if (!frame_pointer_needed
1626 && !cfun->machine->naked_p
1627 && cfun->machine->fp_as_gp_p)
1628 {
1629 fprintf (file, "\t! ----------------------------------------\n");
1630 fprintf (file, "\t! Claim the range of fp-as-gp "
1631 "link time optimization\n");
1632 fprintf (file, "\t.omit_fp_end\n");
1633 fprintf (file, "\t! ----------------------------------------\n");
1634 }
1635
1636 fprintf (file, "\t! BEGIN EPILOGUE\n");
1637 }
1638
1639 /* The content produced from this function
1640 will be placed after epilogue body. */
1641 static void
1642 nds32_asm_function_epilogue (FILE *file,
1643 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1644 {
1645 fprintf (file, "\t! END EPILOGUE\n");
1646 }
1647
1648 static void
1649 nds32_asm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
1650 HOST_WIDE_INT delta,
1651 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
1652 tree function)
1653 {
1654 int this_regno;
1655
1656 /* Make sure unwind info is emitted for the thunk if needed. */
1657 final_start_function (emit_barrier (), file, 1);
1658
1659 this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
1660 ? 1
1661 : 0);
1662
1663 if (delta != 0)
1664 {
1665 if (satisfies_constraint_Is15 (GEN_INT (delta)))
1666 {
1667 fprintf (file, "\taddi\t$r%d, $r%d, %ld\n",
1668 this_regno, this_regno, delta);
1669 }
1670 else if (satisfies_constraint_Is20 (GEN_INT (delta)))
1671 {
1672 fprintf (file, "\tmovi\t$ta, %ld\n", delta);
1673 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1674 }
1675 else
1676 {
1677 fprintf (file, "\tsethi\t$ta, hi20(%ld)\n", delta);
1678 fprintf (file, "\tori\t$ta, $ta, lo12(%ld)\n", delta);
1679 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1680 }
1681 }
1682
1683 fprintf (file, "\tb\t");
1684 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
1685 fprintf (file, "\n");
1686
1687 final_end_function ();
1688 }
1689
1690 /* -- Permitting tail calls. */
1691
1692 /* Determine whether we need to enable warning for function return check. */
1693 static bool
1694 nds32_warn_func_return (tree decl)
1695 {
1696 /* Naked functions are implemented entirely in assembly, including the
1697 return sequence, so suppress warnings about this. */
1698 return !nds32_naked_function_p (decl);
1699 }
1700
1701 \f
1702 /* Implementing the Varargs Macros. */
1703
1704 static void
1705 nds32_setup_incoming_varargs (cumulative_args_t ca,
1706 machine_mode mode,
1707 tree type,
1708 int *pretend_args_size,
1709 int second_time ATTRIBUTE_UNUSED)
1710 {
1711 unsigned int total_args_regs;
1712 unsigned int num_of_used_regs;
1713 unsigned int remaining_reg_count;
1714 CUMULATIVE_ARGS *cum;
1715
1716 /* If we are under hard float abi, we do not need to set *pretend_args_size.
1717 So that all nameless arguments are pushed by caller and all situation
1718 can be handled by GCC itself. */
1719 if (TARGET_HARD_FLOAT)
1720 return;
1721
1722 /* We are using NDS32_MAX_GPR_REGS_FOR_ARGS registers,
1723 counting from NDS32_GPR_ARG_FIRST_REGNUM, for saving incoming arguments.
1724 However, for nameless(anonymous) arguments, we should push them on the
1725 stack so that all the nameless arguments appear to have been passed
1726 consecutively in the memory for accessing. Hence, we need to check and
1727 exclude the registers that are used for named arguments. */
1728
1729 cum = get_cumulative_args (ca);
1730
1731 /* The MODE and TYPE describe the last argument.
1732 We need those information to determine the remaining registers
1733 for varargs. */
1734 total_args_regs
1735 = NDS32_MAX_GPR_REGS_FOR_ARGS + NDS32_GPR_ARG_FIRST_REGNUM;
1736 num_of_used_regs
1737 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1738 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1739
1740 remaining_reg_count = total_args_regs - num_of_used_regs;
1741 *pretend_args_size = remaining_reg_count * UNITS_PER_WORD;
1742
1743 return;
1744 }
1745
1746 static bool
1747 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1748 {
1749 /* If this hook returns true, the named argument of FUNCTION_ARG is always
1750 true for named arguments, and false for unnamed arguments. */
1751 return true;
1752 }
1753
1754 \f
1755 /* Trampolines for Nested Functions. */
1756
1757 static void
1758 nds32_asm_trampoline_template (FILE *f)
1759 {
1760 if (TARGET_REDUCED_REGS)
1761 {
1762 /* Trampoline is not supported on reduced-set registers yet. */
1763 sorry ("a nested function is not supported for reduced registers");
1764 }
1765 else
1766 {
1767 asm_fprintf (f, "\t! Trampoline code template\n");
1768 asm_fprintf (f, "\t! This code fragment will be copied "
1769 "into stack on demand\n");
1770
1771 asm_fprintf (f, "\tmfusr\t$r16,$pc\n");
1772 asm_fprintf (f, "\tlwi\t$r15,[$r16 + 20] "
1773 "! load nested function address\n");
1774 asm_fprintf (f, "\tlwi\t$r16,[$r16 + 16] "
1775 "! load chain_value\n");
1776 asm_fprintf (f, "\tjr\t$r15\n");
1777 }
1778
1779 /* Preserve space ($pc + 16) for saving chain_value,
1780 nds32_trampoline_init will fill the value in this slot. */
1781 asm_fprintf (f, "\t! space for saving chain_value\n");
1782 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1783
1784 /* Preserve space ($pc + 20) for saving nested function address,
1785 nds32_trampoline_init will fill the value in this slot. */
1786 asm_fprintf (f, "\t! space for saving nested function address\n");
1787 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1788 }
1789
1790 /* Emit RTL insns to initialize the variable parts of a trampoline. */
1791 static void
1792 nds32_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
1793 {
1794 int i;
1795
1796 /* Nested function address. */
1797 rtx fnaddr;
1798 /* The memory rtx that is going to
1799 be filled with chain_value. */
1800 rtx chain_value_mem;
1801 /* The memory rtx that is going to
1802 be filled with nested function address. */
1803 rtx nested_func_mem;
1804
1805 /* Start address of trampoline code in stack, for doing cache sync. */
1806 rtx sync_cache_addr;
1807 /* Temporary register for sync instruction. */
1808 rtx tmp_reg;
1809 /* Instruction-cache sync instruction,
1810 requesting an argument as starting address. */
1811 rtx isync_insn;
1812 /* For convenience reason of doing comparison. */
1813 int tramp_align_in_bytes;
1814
1815 /* Trampoline is not supported on reduced-set registers yet. */
1816 if (TARGET_REDUCED_REGS)
1817 sorry ("a nested function is not supported for reduced registers");
1818
1819 /* STEP 1: Copy trampoline code template into stack,
1820 fill up essential data into stack. */
1821
1822 /* Extract nested function address rtx. */
1823 fnaddr = XEXP (DECL_RTL (fndecl), 0);
1824
1825 /* m_tramp is memory rtx that is going to be filled with trampoline code.
1826 We have nds32_asm_trampoline_template() to emit template pattern. */
1827 emit_block_move (m_tramp, assemble_trampoline_template (),
1828 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
1829
1830 /* After copying trampoline code into stack,
1831 fill chain_value into stack. */
1832 chain_value_mem = adjust_address (m_tramp, SImode, 16);
1833 emit_move_insn (chain_value_mem, chain_value);
1834 /* After copying trampoline code int stack,
1835 fill nested function address into stack. */
1836 nested_func_mem = adjust_address (m_tramp, SImode, 20);
1837 emit_move_insn (nested_func_mem, fnaddr);
1838
1839 /* STEP 2: Sync instruction-cache. */
1840
1841 /* We have successfully filled trampoline code into stack.
1842 However, in order to execute code in stack correctly,
1843 we must sync instruction cache. */
1844 sync_cache_addr = XEXP (m_tramp, 0);
1845 tmp_reg = gen_reg_rtx (SImode);
1846 isync_insn = gen_unspec_volatile_isync (tmp_reg);
1847
1848 /* Because nds32_cache_block_size is in bytes,
1849 we get trampoline alignment in bytes for convenient comparison. */
1850 tramp_align_in_bytes = TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT;
1851
1852 if (tramp_align_in_bytes >= nds32_cache_block_size
1853 && (tramp_align_in_bytes % nds32_cache_block_size) == 0)
1854 {
1855 /* Under this condition, the starting address of trampoline
1856 must be aligned to the starting address of each cache block
1857 and we do not have to worry about cross-boundary issue. */
1858 for (i = 0;
1859 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1860 / nds32_cache_block_size;
1861 i++)
1862 {
1863 emit_move_insn (tmp_reg,
1864 plus_constant (Pmode, sync_cache_addr,
1865 nds32_cache_block_size * i));
1866 emit_insn (isync_insn);
1867 }
1868 }
1869 else if (TRAMPOLINE_SIZE > nds32_cache_block_size)
1870 {
1871 /* The starting address of trampoline code
1872 may not be aligned to the cache block,
1873 so the trampoline code may be across two cache block.
1874 We need to sync the last element, which is 4-byte size,
1875 of trampoline template. */
1876 for (i = 0;
1877 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1878 / nds32_cache_block_size;
1879 i++)
1880 {
1881 emit_move_insn (tmp_reg,
1882 plus_constant (Pmode, sync_cache_addr,
1883 nds32_cache_block_size * i));
1884 emit_insn (isync_insn);
1885 }
1886
1887 /* The last element of trampoline template is 4-byte size. */
1888 emit_move_insn (tmp_reg,
1889 plus_constant (Pmode, sync_cache_addr,
1890 TRAMPOLINE_SIZE - 4));
1891 emit_insn (isync_insn);
1892 }
1893 else
1894 {
1895 /* This is the simplest case.
1896 Because TRAMPOLINE_SIZE is less than or
1897 equal to nds32_cache_block_size,
1898 we can just sync start address and
1899 the last element of trampoline code. */
1900
1901 /* Sync starting address of tampoline code. */
1902 emit_move_insn (tmp_reg, sync_cache_addr);
1903 emit_insn (isync_insn);
1904 /* Sync the last element, which is 4-byte size,
1905 of trampoline template. */
1906 emit_move_insn (tmp_reg,
1907 plus_constant (Pmode, sync_cache_addr,
1908 TRAMPOLINE_SIZE - 4));
1909 emit_insn (isync_insn);
1910 }
1911
1912 /* Set instruction serialization barrier
1913 to guarantee the correct operations. */
1914 emit_insn (gen_unspec_volatile_isb ());
1915 }
1916
1917 \f
1918 /* Addressing Modes. */
1919
1920 static bool
1921 nds32_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1922 {
1923 /* For (mem:DI addr) or (mem:DF addr) case,
1924 we only allow 'addr' to be [reg], [symbol_ref],
1925 [const], or [reg + const_int] pattern. */
1926 if (mode == DImode || mode == DFmode)
1927 {
1928 /* Allow [Reg + const_int] addressing mode. */
1929 if (GET_CODE (x) == PLUS)
1930 {
1931 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
1932 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict)
1933 && CONST_INT_P (XEXP (x, 1)))
1934 return true;
1935
1936 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
1937 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict)
1938 && CONST_INT_P (XEXP (x, 0)))
1939 return true;
1940 }
1941
1942 /* Now check [reg], [symbol_ref], and [const]. */
1943 if (GET_CODE (x) != REG
1944 && GET_CODE (x) != SYMBOL_REF
1945 && GET_CODE (x) != CONST)
1946 return false;
1947 }
1948
1949 /* Check if 'x' is a valid address. */
1950 switch (GET_CODE (x))
1951 {
1952 case REG:
1953 /* (mem (reg A)) => [Ra] */
1954 return nds32_address_register_rtx_p (x, strict);
1955
1956 case SYMBOL_REF:
1957 /* (mem (symbol_ref A)) => [symbol_ref] */
1958 /* If -mcmodel=large, the 'symbol_ref' is not a valid address
1959 during or after LRA/reload phase. */
1960 if (TARGET_CMODEL_LARGE
1961 && (reload_completed
1962 || reload_in_progress
1963 || lra_in_progress))
1964 return false;
1965 /* If -mcmodel=medium and the symbol references to rodata section,
1966 the 'symbol_ref' is not a valid address during or after
1967 LRA/reload phase. */
1968 if (TARGET_CMODEL_MEDIUM
1969 && NDS32_SYMBOL_REF_RODATA_P (x)
1970 && (reload_completed
1971 || reload_in_progress
1972 || lra_in_progress))
1973 return false;
1974
1975 return true;
1976
1977 case CONST:
1978 /* (mem (const (...)))
1979 => [ + const_addr ], where const_addr = symbol_ref + const_int */
1980 if (GET_CODE (XEXP (x, 0)) == PLUS)
1981 {
1982 rtx plus_op = XEXP (x, 0);
1983
1984 rtx op0 = XEXP (plus_op, 0);
1985 rtx op1 = XEXP (plus_op, 1);
1986
1987 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
1988 {
1989 /* Now we see the [ + const_addr ] pattern, but we need
1990 some further checking. */
1991 /* If -mcmodel=large, the 'const_addr' is not a valid address
1992 during or after LRA/reload phase. */
1993 if (TARGET_CMODEL_LARGE
1994 && (reload_completed
1995 || reload_in_progress
1996 || lra_in_progress))
1997 return false;
1998 /* If -mcmodel=medium and the symbol references to rodata section,
1999 the 'const_addr' is not a valid address during or after
2000 LRA/reload phase. */
2001 if (TARGET_CMODEL_MEDIUM
2002 && NDS32_SYMBOL_REF_RODATA_P (op0)
2003 && (reload_completed
2004 || reload_in_progress
2005 || lra_in_progress))
2006 return false;
2007
2008 /* At this point we can make sure 'const_addr' is a
2009 valid address. */
2010 return true;
2011 }
2012 }
2013
2014 return false;
2015
2016 case POST_MODIFY:
2017 /* (mem (post_modify (reg) (plus (reg) (reg))))
2018 => [Ra], Rb */
2019 /* (mem (post_modify (reg) (plus (reg) (const_int))))
2020 => [Ra], const_int */
2021 if (GET_CODE (XEXP (x, 0)) == REG
2022 && GET_CODE (XEXP (x, 1)) == PLUS)
2023 {
2024 rtx plus_op = XEXP (x, 1);
2025
2026 rtx op0 = XEXP (plus_op, 0);
2027 rtx op1 = XEXP (plus_op, 1);
2028
2029 if (nds32_address_register_rtx_p (op0, strict)
2030 && nds32_legitimate_index_p (mode, op1, strict))
2031 return true;
2032 else
2033 return false;
2034 }
2035
2036 return false;
2037
2038 case POST_INC:
2039 case POST_DEC:
2040 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2041 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2042 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2043 We only need to deal with register Ra. */
2044 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
2045 return true;
2046 else
2047 return false;
2048
2049 case PLUS:
2050 /* (mem (plus reg const_int))
2051 => [Ra + imm] */
2052 /* (mem (plus reg reg))
2053 => [Ra + Rb] */
2054 /* (mem (plus (mult reg const_int) reg))
2055 => [Ra + Rb << sv] */
2056 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2057 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict))
2058 return true;
2059 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2060 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict))
2061 return true;
2062 else
2063 return false;
2064
2065 case LO_SUM:
2066 /* (mem (lo_sum (reg) (symbol_ref))) */
2067 /* (mem (lo_sum (reg) (const))) */
2068 gcc_assert (REG_P (XEXP (x, 0)));
2069 if (GET_CODE (XEXP (x, 1)) == SYMBOL_REF
2070 || GET_CODE (XEXP (x, 1)) == CONST)
2071 return nds32_legitimate_address_p (mode, XEXP (x, 1), strict);
2072 else
2073 return false;
2074
2075 default:
2076 return false;
2077 }
2078 }
2079
2080 \f
2081 /* Describing Relative Costs of Operations. */
2082
2083 static int
2084 nds32_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2085 reg_class_t from,
2086 reg_class_t to)
2087 {
2088 if (from == HIGH_REGS || to == HIGH_REGS)
2089 return 6;
2090
2091 return 2;
2092 }
2093
2094 static int
2095 nds32_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2096 reg_class_t rclass ATTRIBUTE_UNUSED,
2097 bool in ATTRIBUTE_UNUSED)
2098 {
2099 return 8;
2100 }
2101
2102 /* This target hook describes the relative costs of RTL expressions.
2103 Return 'true' when all subexpressions of x have been processed.
2104 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
2105 Refer to gcc/rtlanal.c for more information. */
2106 static bool
2107 nds32_rtx_costs (rtx x,
2108 int code,
2109 int outer_code,
2110 int opno,
2111 int *total,
2112 bool speed)
2113 {
2114 return nds32_rtx_costs_impl (x, code, outer_code, opno, total, speed);
2115 }
2116
2117 static int
2118 nds32_address_cost (rtx address,
2119 machine_mode mode,
2120 addr_space_t as,
2121 bool speed)
2122 {
2123 return nds32_address_cost_impl (address, mode, as, speed);
2124 }
2125
2126 \f
2127 /* Dividing the Output into Sections (Texts, Data, . . . ). */
2128
2129 /* If references to a symbol or a constant must be treated differently
2130 depending on something about the variable or function named by the symbol
2131 (such as what section it is in), we use this hook to store flags
2132 in symbol_ref rtx. */
2133 static void
2134 nds32_encode_section_info (tree decl, rtx rtl, int new_decl_p)
2135 {
2136 default_encode_section_info (decl, rtl, new_decl_p);
2137
2138 /* For the memory rtx, if it references to rodata section, we can store
2139 NDS32_SYMBOL_FLAG_RODATA flag into symbol_ref rtx so that the
2140 nds32_legitimate_address_p() can determine how to treat such symbol_ref
2141 based on -mcmodel=X and this information. */
2142 if (MEM_P (rtl) && MEM_READONLY_P (rtl))
2143 {
2144 rtx addr = XEXP (rtl, 0);
2145
2146 if (GET_CODE (addr) == SYMBOL_REF)
2147 {
2148 /* For (mem (symbol_ref X)) case. */
2149 SYMBOL_REF_FLAGS (addr) |= NDS32_SYMBOL_FLAG_RODATA;
2150 }
2151 else if (GET_CODE (addr) == CONST
2152 && GET_CODE (XEXP (addr, 0)) == PLUS)
2153 {
2154 /* For (mem (const (plus (symbol_ref X) (const_int N)))) case. */
2155 rtx plus_op = XEXP (addr, 0);
2156 rtx op0 = XEXP (plus_op, 0);
2157 rtx op1 = XEXP (plus_op, 1);
2158
2159 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
2160 SYMBOL_REF_FLAGS (op0) |= NDS32_SYMBOL_FLAG_RODATA;
2161 }
2162 }
2163 }
2164
2165 \f
2166 /* Defining the Output Assembler Language. */
2167
2168 /* -- The Overall Framework of an Assembler File. */
2169
2170 static void
2171 nds32_asm_file_start (void)
2172 {
2173 default_file_start ();
2174
2175 /* Tell assembler which ABI we are using. */
2176 fprintf (asm_out_file, "\t! ABI version\n");
2177 fprintf (asm_out_file, "\t.abi_2\n");
2178
2179 /* Tell assembler that this asm code is generated by compiler. */
2180 fprintf (asm_out_file, "\t! This asm file is generated by compiler\n");
2181 fprintf (asm_out_file, "\t.flag\tverbatim\n");
2182 /* Give assembler the size of each vector for interrupt handler. */
2183 fprintf (asm_out_file, "\t! This vector size directive is required "
2184 "for checking inconsistency on interrupt handler\n");
2185 fprintf (asm_out_file, "\t.vec_size\t%d\n", nds32_isr_vector_size);
2186
2187 fprintf (asm_out_file, "\t! ------------------------------------\n");
2188
2189 if (TARGET_ISA_V2)
2190 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V2");
2191 if (TARGET_ISA_V3)
2192 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3");
2193 if (TARGET_ISA_V3M)
2194 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3M");
2195
2196 if (TARGET_CMODEL_SMALL)
2197 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "SMALL");
2198 if (TARGET_CMODEL_MEDIUM)
2199 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "MEDIUM");
2200 if (TARGET_CMODEL_LARGE)
2201 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "LARGE");
2202
2203 fprintf (asm_out_file, "\t! Endian setting\t: %s\n",
2204 ((TARGET_BIG_ENDIAN) ? "big-endian"
2205 : "little-endian"));
2206
2207 fprintf (asm_out_file, "\t! ------------------------------------\n");
2208
2209 fprintf (asm_out_file, "\t! Use conditional move\t\t: %s\n",
2210 ((TARGET_CMOV) ? "Yes"
2211 : "No"));
2212 fprintf (asm_out_file, "\t! Use performance extension\t: %s\n",
2213 ((TARGET_PERF_EXT) ? "Yes"
2214 : "No"));
2215
2216 fprintf (asm_out_file, "\t! ------------------------------------\n");
2217
2218 fprintf (asm_out_file, "\t! V3PUSH instructions\t: %s\n",
2219 ((TARGET_V3PUSH) ? "Yes"
2220 : "No"));
2221 fprintf (asm_out_file, "\t! 16-bit instructions\t: %s\n",
2222 ((TARGET_16_BIT) ? "Yes"
2223 : "No"));
2224 fprintf (asm_out_file, "\t! Reduced registers set\t: %s\n",
2225 ((TARGET_REDUCED_REGS) ? "Yes"
2226 : "No"));
2227
2228 fprintf (asm_out_file, "\t! ------------------------------------\n");
2229
2230 if (optimize_size)
2231 fprintf (asm_out_file, "\t! Optimization level\t: -Os\n");
2232 else
2233 fprintf (asm_out_file, "\t! Optimization level\t: -O%d\n", optimize);
2234
2235 fprintf (asm_out_file, "\t! ------------------------------------\n");
2236
2237 fprintf (asm_out_file, "\t! Cache block size\t: %d\n",
2238 nds32_cache_block_size);
2239
2240 fprintf (asm_out_file, "\t! ------------------------------------\n");
2241
2242 nds32_asm_file_start_for_isr ();
2243 }
2244
2245 static void
2246 nds32_asm_file_end (void)
2247 {
2248 nds32_asm_file_end_for_isr ();
2249
2250 fprintf (asm_out_file, "\t! ------------------------------------\n");
2251 }
2252
2253 /* -- Output and Generation of Labels. */
2254
2255 static void
2256 nds32_asm_globalize_label (FILE *stream, const char *name)
2257 {
2258 fputs ("\t.global\t", stream);
2259 assemble_name (stream, name);
2260 fputs ("\n", stream);
2261 }
2262
2263 /* -- Output of Assembler Instructions. */
2264
2265 static void
2266 nds32_print_operand (FILE *stream, rtx x, int code)
2267 {
2268 int op_value;
2269
2270 switch (code)
2271 {
2272 case 0 :
2273 /* Do nothing special. */
2274 break;
2275
2276 case 'V':
2277 /* 'x' is supposed to be CONST_INT, get the value. */
2278 gcc_assert (CONST_INT_P (x));
2279 op_value = INTVAL (x);
2280
2281 /* According to the Andes architecture,
2282 the system/user register index range is 0 ~ 1023.
2283 In order to avoid conflict between user-specified-integer value
2284 and enum-specified-register value,
2285 the 'enum nds32_intrinsic_registers' value
2286 in nds32_intrinsic.h starts from 1024. */
2287 if (op_value < 1024 && op_value >= 0)
2288 {
2289 /* If user gives integer value directly (0~1023),
2290 we just print out the value. */
2291 fprintf (stream, "%d", op_value);
2292 }
2293 else if (op_value < 0
2294 || op_value >= ((int) ARRAY_SIZE (nds32_intrinsic_register_names)
2295 + 1024))
2296 {
2297 /* The enum index value for array size is out of range. */
2298 error ("intrinsic register index is out of range");
2299 }
2300 else
2301 {
2302 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2303 we can print out register name. Remember to substract 1024. */
2304 fprintf (stream, "%s",
2305 nds32_intrinsic_register_names[op_value - 1024]);
2306 }
2307
2308 /* No need to handle following process, so return immediately. */
2309 return;
2310
2311 default :
2312 /* Unknown flag. */
2313 output_operand_lossage ("invalid operand output code");
2314 break;
2315 }
2316
2317 switch (GET_CODE (x))
2318 {
2319 case LABEL_REF:
2320 case SYMBOL_REF:
2321 output_addr_const (stream, x);
2322 break;
2323
2324 case REG:
2325 /* Forbid using static chain register ($r16)
2326 on reduced-set registers configuration. */
2327 if (TARGET_REDUCED_REGS
2328 && REGNO (x) == STATIC_CHAIN_REGNUM)
2329 sorry ("a nested function is not supported for reduced registers");
2330
2331 /* Normal cases, print out register name. */
2332 fputs (reg_names[REGNO (x)], stream);
2333 break;
2334
2335 case MEM:
2336 output_address (XEXP (x, 0));
2337 break;
2338
2339 case CODE_LABEL:
2340 case CONST_INT:
2341 case CONST:
2342 output_addr_const (stream, x);
2343 break;
2344
2345 default:
2346 /* Generally, output_addr_const () is able to handle most cases.
2347 We want to see what CODE could appear,
2348 so we use gcc_unreachable() to stop it. */
2349 debug_rtx (x);
2350 gcc_unreachable ();
2351 break;
2352 }
2353 }
2354
2355 static void
2356 nds32_print_operand_address (FILE *stream, rtx x)
2357 {
2358 rtx op0, op1;
2359
2360 switch (GET_CODE (x))
2361 {
2362 case SYMBOL_REF:
2363 case CONST:
2364 /* [ + symbol_ref] */
2365 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2366 fputs ("[ + ", stream);
2367 output_addr_const (stream, x);
2368 fputs ("]", stream);
2369 break;
2370
2371 case REG:
2372 /* Forbid using static chain register ($r16)
2373 on reduced-set registers configuration. */
2374 if (TARGET_REDUCED_REGS
2375 && REGNO (x) == STATIC_CHAIN_REGNUM)
2376 sorry ("a nested function is not supported for reduced registers");
2377
2378 /* [Ra] */
2379 fprintf (stream, "[%s]", reg_names[REGNO (x)]);
2380 break;
2381
2382 case PLUS:
2383 op0 = XEXP (x, 0);
2384 op1 = XEXP (x, 1);
2385
2386 /* Checking op0, forbid using static chain register ($r16)
2387 on reduced-set registers configuration. */
2388 if (TARGET_REDUCED_REGS
2389 && REG_P (op0)
2390 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2391 sorry ("a nested function is not supported for reduced registers");
2392 /* Checking op1, forbid using static chain register ($r16)
2393 on reduced-set registers configuration. */
2394 if (TARGET_REDUCED_REGS
2395 && REG_P (op1)
2396 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2397 sorry ("a nested function is not supported for reduced registers");
2398
2399 if (REG_P (op0) && CONST_INT_P (op1))
2400 {
2401 /* [Ra + imm] */
2402 fprintf (stream, "[%s + (%d)]",
2403 reg_names[REGNO (op0)], (int)INTVAL (op1));
2404 }
2405 else if (REG_P (op0) && REG_P (op1))
2406 {
2407 /* [Ra + Rb] */
2408 fprintf (stream, "[%s + %s]",
2409 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2410 }
2411 else if (GET_CODE (op0) == MULT && REG_P (op1))
2412 {
2413 /* [Ra + Rb << sv]
2414 From observation, the pattern looks like:
2415 (plus:SI (mult:SI (reg:SI 58)
2416 (const_int 4 [0x4]))
2417 (reg/f:SI 57)) */
2418 int sv;
2419
2420 /* We need to set sv to output shift value. */
2421 if (INTVAL (XEXP (op0, 1)) == 1)
2422 sv = 0;
2423 else if (INTVAL (XEXP (op0, 1)) == 2)
2424 sv = 1;
2425 else if (INTVAL (XEXP (op0, 1)) == 4)
2426 sv = 2;
2427 else
2428 gcc_unreachable ();
2429
2430 fprintf (stream, "[%s + %s << %d]",
2431 reg_names[REGNO (op1)],
2432 reg_names[REGNO (XEXP (op0, 0))],
2433 sv);
2434 }
2435 else
2436 {
2437 /* The control flow is not supposed to be here. */
2438 debug_rtx (x);
2439 gcc_unreachable ();
2440 }
2441
2442 break;
2443
2444 case POST_MODIFY:
2445 /* (post_modify (regA) (plus (regA) (regB)))
2446 (post_modify (regA) (plus (regA) (const_int)))
2447 We would like to extract
2448 regA and regB (or const_int) from plus rtx. */
2449 op0 = XEXP (XEXP (x, 1), 0);
2450 op1 = XEXP (XEXP (x, 1), 1);
2451
2452 /* Checking op0, forbid using static chain register ($r16)
2453 on reduced-set registers configuration. */
2454 if (TARGET_REDUCED_REGS
2455 && REG_P (op0)
2456 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2457 sorry ("a nested function is not supported for reduced registers");
2458 /* Checking op1, forbid using static chain register ($r16)
2459 on reduced-set registers configuration. */
2460 if (TARGET_REDUCED_REGS
2461 && REG_P (op1)
2462 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2463 sorry ("a nested function is not supported for reduced registers");
2464
2465 if (REG_P (op0) && REG_P (op1))
2466 {
2467 /* [Ra], Rb */
2468 fprintf (stream, "[%s], %s",
2469 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2470 }
2471 else if (REG_P (op0) && CONST_INT_P (op1))
2472 {
2473 /* [Ra], imm */
2474 fprintf (stream, "[%s], %d",
2475 reg_names[REGNO (op0)], (int)INTVAL (op1));
2476 }
2477 else
2478 {
2479 /* The control flow is not supposed to be here. */
2480 debug_rtx (x);
2481 gcc_unreachable ();
2482 }
2483
2484 break;
2485
2486 case POST_INC:
2487 case POST_DEC:
2488 op0 = XEXP (x, 0);
2489
2490 /* Checking op0, forbid using static chain register ($r16)
2491 on reduced-set registers configuration. */
2492 if (TARGET_REDUCED_REGS
2493 && REG_P (op0)
2494 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2495 sorry ("a nested function is not supported for reduced registers");
2496
2497 if (REG_P (op0))
2498 {
2499 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
2500 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2501 We only need to deal with register Ra. */
2502 fprintf (stream, "[%s]", reg_names[REGNO (op0)]);
2503 }
2504 else
2505 {
2506 /* The control flow is not supposed to be here. */
2507 debug_rtx (x);
2508 gcc_unreachable ();
2509 }
2510
2511 break;
2512
2513 default :
2514 /* Generally, output_addr_const () is able to handle most cases.
2515 We want to see what CODE could appear,
2516 so we use gcc_unreachable() to stop it. */
2517 debug_rtx (x);
2518 gcc_unreachable ();
2519 break;
2520 }
2521 }
2522
2523 \f
2524 /* Defining target-specific uses of __attribute__. */
2525
2526 /* Add some checking after merging attributes. */
2527 static tree
2528 nds32_merge_decl_attributes (tree olddecl, tree newdecl)
2529 {
2530 tree combined_attrs;
2531
2532 /* Create combined attributes. */
2533 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
2534 DECL_ATTRIBUTES (newdecl));
2535
2536 /* Since newdecl is acutally a duplicate of olddecl,
2537 we can take olddecl for some operations. */
2538 if (TREE_CODE (olddecl) == FUNCTION_DECL)
2539 {
2540 /* Check isr-specific attributes conflict. */
2541 nds32_check_isr_attrs_conflict (olddecl, combined_attrs);
2542 }
2543
2544 return combined_attrs;
2545 }
2546
2547 /* Add some checking when inserting attributes. */
2548 static void
2549 nds32_insert_attributes (tree decl, tree *attributes)
2550 {
2551 /* For function declaration, we need to check isr-specific attributes:
2552 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
2553 2. Check valid integer value for interrupt/exception.
2554 3. Check valid integer value for reset.
2555 4. Check valid function for nmi/warm. */
2556 if (TREE_CODE (decl) == FUNCTION_DECL)
2557 {
2558 tree func_attrs;
2559 tree intr, excp, reset;
2560
2561 /* Pick up function attributes. */
2562 func_attrs = *attributes;
2563
2564 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
2565 nds32_check_isr_attrs_conflict (decl, func_attrs);
2566
2567 /* Now we are starting to check valid id value
2568 for interrupt/exception/reset.
2569 Note that we ONLY check its validity here.
2570 To construct isr vector information, it is still performed
2571 by nds32_construct_isr_vectors_information(). */
2572 intr = lookup_attribute ("interrupt", func_attrs);
2573 excp = lookup_attribute ("exception", func_attrs);
2574 reset = lookup_attribute ("reset", func_attrs);
2575
2576 if (intr || excp)
2577 {
2578 /* Deal with interrupt/exception. */
2579 tree id_list;
2580 unsigned int lower_bound, upper_bound;
2581
2582 /* The way to handle interrupt or exception is the same,
2583 we just need to take care of actual vector number.
2584 For interrupt(0..63), the actual vector number is (9..72).
2585 For exception(1..8), the actual vector number is (1..8). */
2586 lower_bound = (intr) ? (0) : (1);
2587 upper_bound = (intr) ? (63) : (8);
2588
2589 /* Prepare id list so that we can traverse id value. */
2590 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
2591
2592 /* 2. Check valid integer value for interrupt/exception. */
2593 while (id_list)
2594 {
2595 tree id;
2596
2597 /* Pick up each vector id value. */
2598 id = TREE_VALUE (id_list);
2599 /* Issue error if it is not a valid integer value. */
2600 if (TREE_CODE (id) != INTEGER_CST
2601 || wi::ltu_p (id, lower_bound)
2602 || wi::gtu_p (id, upper_bound))
2603 error ("invalid id value for interrupt/exception attribute");
2604
2605 /* Advance to next id. */
2606 id_list = TREE_CHAIN (id_list);
2607 }
2608 }
2609 else if (reset)
2610 {
2611 /* Deal with reset. */
2612 tree id_list;
2613 tree id;
2614 tree nmi, warm;
2615 unsigned int lower_bound;
2616 unsigned int upper_bound;
2617
2618 /* Prepare id_list and identify id value so that
2619 we can check if total number of vectors is valid. */
2620 id_list = TREE_VALUE (reset);
2621 id = TREE_VALUE (id_list);
2622
2623 /* The maximum numbers for user's interrupt is 64. */
2624 lower_bound = 0;
2625 upper_bound = 64;
2626
2627 /* 3. Check valid integer value for reset. */
2628 if (TREE_CODE (id) != INTEGER_CST
2629 || wi::ltu_p (id, lower_bound)
2630 || wi::gtu_p (id, upper_bound))
2631 error ("invalid id value for reset attribute");
2632
2633 /* 4. Check valid function for nmi/warm. */
2634 nmi = lookup_attribute ("nmi", func_attrs);
2635 warm = lookup_attribute ("warm", func_attrs);
2636
2637 if (nmi != NULL_TREE)
2638 {
2639 tree nmi_func_list;
2640 tree nmi_func;
2641
2642 nmi_func_list = TREE_VALUE (nmi);
2643 nmi_func = TREE_VALUE (nmi_func_list);
2644
2645 /* Issue error if it is not a valid nmi function. */
2646 if (TREE_CODE (nmi_func) != IDENTIFIER_NODE)
2647 error ("invalid nmi function for reset attribute");
2648 }
2649
2650 if (warm != NULL_TREE)
2651 {
2652 tree warm_func_list;
2653 tree warm_func;
2654
2655 warm_func_list = TREE_VALUE (warm);
2656 warm_func = TREE_VALUE (warm_func_list);
2657
2658 /* Issue error if it is not a valid warm function. */
2659 if (TREE_CODE (warm_func) != IDENTIFIER_NODE)
2660 error ("invalid warm function for reset attribute");
2661 }
2662 }
2663 else
2664 {
2665 /* No interrupt, exception, or reset attribute is set. */
2666 return;
2667 }
2668 }
2669 }
2670
2671 static bool
2672 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED,
2673 tree pop_target ATTRIBUTE_UNUSED)
2674 {
2675 /* Currently, we do not parse any pragma target by ourself,
2676 so just simply return false. */
2677 return false;
2678 }
2679
2680 static void
2681 nds32_option_override (void)
2682 {
2683 /* After all the command options have been parsed,
2684 we shall deal with some flags for changing compiler settings. */
2685
2686 /* At first, we check if we have to strictly
2687 set some flags based on ISA family. */
2688 if (TARGET_ISA_V2)
2689 {
2690 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
2691 target_flags &= ~MASK_V3PUSH;
2692 }
2693 if (TARGET_ISA_V3)
2694 {
2695 /* Under V3 ISA, currently nothing should be strictly set. */
2696 }
2697 if (TARGET_ISA_V3M)
2698 {
2699 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
2700 target_flags |= MASK_REDUCED_REGS;
2701 /* Under V3M ISA, we need to strictly disable TARGET_PERF_EXT. */
2702 target_flags &= ~MASK_PERF_EXT;
2703 }
2704
2705 /* See if we are using reduced-set registers:
2706 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
2707 If so, we must forbid using $r11~$r14, $r16~$r27. */
2708 if (TARGET_REDUCED_REGS)
2709 {
2710 int r;
2711
2712 /* Prevent register allocator from
2713 choosing it as doing register allocation. */
2714 for (r = 11; r <= 14; r++)
2715 fixed_regs[r] = call_used_regs[r] = 1;
2716 for (r = 16; r <= 27; r++)
2717 fixed_regs[r] = call_used_regs[r] = 1;
2718 }
2719
2720 if (!TARGET_16_BIT)
2721 {
2722 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
2723 target_flags &= ~MASK_V3PUSH;
2724 }
2725
2726 /* Currently, we don't support PIC code generation yet. */
2727 if (flag_pic)
2728 sorry ("not support -fpic");
2729 }
2730
2731 \f
2732 /* Miscellaneous Parameters. */
2733
2734 static void
2735 nds32_init_builtins (void)
2736 {
2737 nds32_init_builtins_impl ();
2738 }
2739
2740 static rtx
2741 nds32_expand_builtin (tree exp,
2742 rtx target,
2743 rtx subtarget,
2744 machine_mode mode,
2745 int ignore)
2746 {
2747 return nds32_expand_builtin_impl (exp, target, subtarget, mode, ignore);
2748 }
2749
2750
2751 /* ------------------------------------------------------------------------ */
2752
2753 /* PART 4: Implemet extern function definitions,
2754 the prototype is in nds32-protos.h. */
2755 \f
2756 /* Defining Data Structures for Per-function Information. */
2757
2758 void
2759 nds32_init_expanders (void)
2760 {
2761 /* Arrange to initialize and mark the machine per-function status. */
2762 init_machine_status = nds32_init_machine_status;
2763 }
2764
2765 \f
2766 /* Register Usage. */
2767
2768 /* -- How Values Fit in Registers. */
2769
2770 int
2771 nds32_hard_regno_nregs (int regno ATTRIBUTE_UNUSED,
2772 machine_mode mode)
2773 {
2774 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
2775 }
2776
2777 int
2778 nds32_hard_regno_mode_ok (int regno, machine_mode mode)
2779 {
2780 /* Restrict double-word quantities to even register pairs. */
2781 if (HARD_REGNO_NREGS (regno, mode) == 1
2782 || !((regno) & 1))
2783 return 1;
2784
2785 return 0;
2786 }
2787
2788 \f
2789 /* Register Classes. */
2790
2791 enum reg_class
2792 nds32_regno_reg_class (int regno)
2793 {
2794 /* Refer to nds32.h for more register class details. */
2795
2796 if (regno >= 0 && regno <= 7)
2797 return LOW_REGS;
2798 else if (regno >= 8 && regno <= 11)
2799 return MIDDLE_REGS;
2800 else if (regno >= 12 && regno <= 14)
2801 return HIGH_REGS;
2802 else if (regno == 15)
2803 return R15_TA_REG;
2804 else if (regno >= 16 && regno <= 19)
2805 return MIDDLE_REGS;
2806 else if (regno >= 20 && regno <= 31)
2807 return HIGH_REGS;
2808 else if (regno == 32 || regno == 33)
2809 return FRAME_REGS;
2810 else
2811 return NO_REGS;
2812 }
2813
2814 \f
2815 /* Stack Layout and Calling Conventions. */
2816
2817 /* -- Basic Stack Layout. */
2818
2819 rtx
2820 nds32_return_addr_rtx (int count,
2821 rtx frameaddr ATTRIBUTE_UNUSED)
2822 {
2823 /* There is no way to determine the return address
2824 if frameaddr is the frame that has 'count' steps
2825 up from current frame. */
2826 if (count != 0)
2827 return NULL_RTX;
2828
2829 /* If count == 0, it means we are at current frame,
2830 the return address is $r30 ($lp). */
2831 return get_hard_reg_initial_val (Pmode, LP_REGNUM);
2832 }
2833
2834 /* -- Eliminating Frame Pointer and Arg Pointer. */
2835
2836 HOST_WIDE_INT
2837 nds32_initial_elimination_offset (unsigned int from_reg, unsigned int to_reg)
2838 {
2839 HOST_WIDE_INT offset;
2840
2841 /* Compute and setup stack frame size.
2842 The result will be in cfun->machine. */
2843 nds32_compute_stack_frame ();
2844
2845 /* Remember to consider
2846 cfun->machine->callee_saved_area_padding_bytes
2847 when calculating offset. */
2848 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
2849 {
2850 offset = (cfun->machine->fp_size
2851 + cfun->machine->gp_size
2852 + cfun->machine->lp_size
2853 + cfun->machine->callee_saved_gpr_regs_size
2854 + cfun->machine->callee_saved_area_gpr_padding_bytes
2855 + cfun->machine->local_size
2856 + cfun->machine->out_args_size);
2857 }
2858 else if (from_reg == ARG_POINTER_REGNUM
2859 && to_reg == HARD_FRAME_POINTER_REGNUM)
2860 {
2861 offset = 0;
2862 }
2863 else if (from_reg == FRAME_POINTER_REGNUM
2864 && to_reg == STACK_POINTER_REGNUM)
2865 {
2866 offset = (cfun->machine->local_size + cfun->machine->out_args_size);
2867 }
2868 else if (from_reg == FRAME_POINTER_REGNUM
2869 && to_reg == HARD_FRAME_POINTER_REGNUM)
2870 {
2871 offset = (-1) * (cfun->machine->fp_size
2872 + cfun->machine->gp_size
2873 + cfun->machine->lp_size
2874 + cfun->machine->callee_saved_gpr_regs_size
2875 + cfun->machine->callee_saved_area_gpr_padding_bytes);
2876 }
2877 else
2878 {
2879 gcc_unreachable ();
2880 }
2881
2882 return offset;
2883 }
2884
2885 /* -- Passing Arguments in Registers. */
2886
2887 void
2888 nds32_init_cumulative_args (CUMULATIVE_ARGS *cum,
2889 tree fntype ATTRIBUTE_UNUSED,
2890 rtx libname ATTRIBUTE_UNUSED,
2891 tree fndecl ATTRIBUTE_UNUSED,
2892 int n_named_args ATTRIBUTE_UNUSED)
2893 {
2894 /* Initial available registers
2895 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
2896 for passing arguments. */
2897 cum->gpr_offset = 0;
2898 }
2899
2900 /* -- Function Entry and Exit. */
2901
2902 /* Function for normal multiple push prologue. */
2903 void
2904 nds32_expand_prologue (void)
2905 {
2906 int fp_adjust;
2907 int sp_adjust;
2908 int en4_const;
2909
2910 rtx Rb, Re;
2911 rtx fp_adjust_insn, sp_adjust_insn;
2912
2913 /* Compute and setup stack frame size.
2914 The result will be in cfun->machine. */
2915 nds32_compute_stack_frame ();
2916
2917 /* If this is a variadic function, first we need to push argument
2918 registers that hold the unnamed argument value. */
2919 if (cfun->machine->va_args_size != 0)
2920 {
2921 Rb = gen_rtx_REG (SImode, cfun->machine->va_args_first_regno);
2922 Re = gen_rtx_REG (SImode, cfun->machine->va_args_last_regno);
2923 /* No need to push $fp, $gp, or $lp, so use GEN_INT(0). */
2924 nds32_emit_stack_push_multiple (Rb, Re, GEN_INT (0), true);
2925
2926 /* We may also need to adjust stack pointer for padding bytes
2927 because varargs may cause $sp not 8-byte aligned. */
2928 if (cfun->machine->va_args_area_padding_bytes)
2929 {
2930 /* Generate sp adjustment instruction. */
2931 sp_adjust = cfun->machine->va_args_area_padding_bytes;
2932 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2933 stack_pointer_rtx,
2934 GEN_INT (-1 * sp_adjust));
2935
2936 /* Emit rtx into instructions list and receive INSN rtx form. */
2937 sp_adjust_insn = emit_insn (sp_adjust_insn);
2938
2939 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2940 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2941 generate CFI (Call Frame Information) stuff. */
2942 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
2943 }
2944 }
2945
2946 /* If the function is 'naked',
2947 we do not have to generate prologue code fragment. */
2948 if (cfun->machine->naked_p)
2949 return;
2950
2951 /* Get callee_first_regno and callee_last_regno. */
2952 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
2953 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
2954
2955 /* nds32_emit_stack_push_multiple(first_regno, last_regno),
2956 the pattern 'stack_push_multiple' is implemented in nds32.md.
2957 For En4 field, we have to calculate its constant value.
2958 Refer to Andes ISA for more information. */
2959 en4_const = 0;
2960 if (cfun->machine->fp_size)
2961 en4_const += 8;
2962 if (cfun->machine->gp_size)
2963 en4_const += 4;
2964 if (cfun->machine->lp_size)
2965 en4_const += 2;
2966
2967 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2968 to be saved, we don't have to create multiple push instruction.
2969 Otherwise, a multiple push instruction is needed. */
2970 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
2971 {
2972 /* Create multiple push instruction rtx. */
2973 nds32_emit_stack_push_multiple (Rb, Re, GEN_INT (en4_const), false);
2974 }
2975
2976 /* Check frame_pointer_needed to see
2977 if we shall emit fp adjustment instruction. */
2978 if (frame_pointer_needed)
2979 {
2980 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
2981 + (4 * callee-saved-registers)
2982 Note: No need to adjust
2983 cfun->machine->callee_saved_area_padding_bytes,
2984 because, at this point, stack pointer is just
2985 at the position after push instruction. */
2986 fp_adjust = cfun->machine->fp_size
2987 + cfun->machine->gp_size
2988 + cfun->machine->lp_size
2989 + cfun->machine->callee_saved_gpr_regs_size;
2990 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
2991 stack_pointer_rtx,
2992 GEN_INT (fp_adjust));
2993 /* Emit rtx into instructions list and receive INSN rtx form. */
2994 fp_adjust_insn = emit_insn (fp_adjust_insn);
2995
2996 /* The insn rtx 'fp_adjust_insn' will change frame layout. */
2997 RTX_FRAME_RELATED_P (fp_adjust_insn) = 1;
2998 }
2999
3000 /* Adjust $sp = $sp - local_size - out_args_size
3001 - callee_saved_area_padding_bytes. */
3002 sp_adjust = cfun->machine->local_size
3003 + cfun->machine->out_args_size
3004 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3005 /* sp_adjust value may be out of range of the addi instruction,
3006 create alternative add behavior with TA_REGNUM if necessary,
3007 using NEGATIVE value to tell that we are decreasing address. */
3008 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3009 if (sp_adjust)
3010 {
3011 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
3012 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3013 stack_pointer_rtx,
3014 GEN_INT (-1 * sp_adjust));
3015 /* Emit rtx into instructions list and receive INSN rtx form. */
3016 sp_adjust_insn = emit_insn (sp_adjust_insn);
3017
3018 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3019 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3020 generate CFI (Call Frame Information) stuff. */
3021 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3022 }
3023
3024 /* Prevent the instruction scheduler from
3025 moving instructions across the boundary. */
3026 emit_insn (gen_blockage ());
3027 }
3028
3029 /* Function for normal multiple pop epilogue. */
3030 void
3031 nds32_expand_epilogue (bool sibcall_p)
3032 {
3033 int sp_adjust;
3034 int en4_const;
3035
3036 rtx Rb, Re;
3037 rtx sp_adjust_insn;
3038
3039 /* Compute and setup stack frame size.
3040 The result will be in cfun->machine. */
3041 nds32_compute_stack_frame ();
3042
3043 /* Prevent the instruction scheduler from
3044 moving instructions across the boundary. */
3045 emit_insn (gen_blockage ());
3046
3047 /* If the function is 'naked', we do not have to generate
3048 epilogue code fragment BUT 'ret' instruction.
3049 However, if this function is also a variadic function,
3050 we need to create adjust stack pointer before 'ret' instruction. */
3051 if (cfun->machine->naked_p)
3052 {
3053 /* If this is a variadic function, we do not have to restore argument
3054 registers but need to adjust stack pointer back to previous stack
3055 frame location before return. */
3056 if (cfun->machine->va_args_size != 0)
3057 {
3058 /* Generate sp adjustment instruction.
3059 We need to consider padding bytes here. */
3060 sp_adjust = cfun->machine->va_args_size
3061 + cfun->machine->va_args_area_padding_bytes;
3062 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3063 stack_pointer_rtx,
3064 GEN_INT (sp_adjust));
3065 /* Emit rtx into instructions list and receive INSN rtx form. */
3066 sp_adjust_insn = emit_insn (sp_adjust_insn);
3067
3068 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3069 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3070 generate CFI (Call Frame Information) stuff. */
3071 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3072 }
3073
3074 /* Generate return instruction by using 'return_internal' pattern.
3075 Make sure this instruction is after gen_blockage(). */
3076 if (!sibcall_p)
3077 emit_jump_insn (gen_return_internal ());
3078 return;
3079 }
3080
3081 if (frame_pointer_needed)
3082 {
3083 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
3084 - (4 * callee-saved-registers)
3085 Note: No need to adjust
3086 cfun->machine->callee_saved_area_padding_bytes,
3087 because we want to adjust stack pointer
3088 to the position for pop instruction. */
3089 sp_adjust = cfun->machine->fp_size
3090 + cfun->machine->gp_size
3091 + cfun->machine->lp_size
3092 + cfun->machine->callee_saved_gpr_regs_size;
3093 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3094 hard_frame_pointer_rtx,
3095 GEN_INT (-1 * sp_adjust));
3096 /* Emit rtx into instructions list and receive INSN rtx form. */
3097 sp_adjust_insn = emit_insn (sp_adjust_insn);
3098
3099 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3100 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3101 }
3102 else
3103 {
3104 /* If frame pointer is NOT needed,
3105 we cannot calculate the sp adjustment from frame pointer.
3106 Instead, we calculate the adjustment by local_size,
3107 out_args_size, and callee_saved_area_padding_bytes.
3108 Notice that such sp adjustment value may be out of range,
3109 so we have to deal with it as well. */
3110
3111 /* Adjust $sp = $sp + local_size + out_args_size
3112 + callee_saved_area_padding_bytes. */
3113 sp_adjust = cfun->machine->local_size
3114 + cfun->machine->out_args_size
3115 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3116 /* sp_adjust value may be out of range of the addi instruction,
3117 create alternative add behavior with TA_REGNUM if necessary,
3118 using POSITIVE value to tell that we are increasing address. */
3119 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3120 if (sp_adjust)
3121 {
3122 /* Generate sp adjustment instruction
3123 if and only if sp_adjust != 0. */
3124 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3125 stack_pointer_rtx,
3126 GEN_INT (sp_adjust));
3127 /* Emit rtx into instructions list and receive INSN rtx form. */
3128 sp_adjust_insn = emit_insn (sp_adjust_insn);
3129
3130 /* The insn rtx 'sp_adjust_insn' will change frame layout. */
3131 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3132 }
3133 }
3134
3135 /* Get callee_first_regno and callee_last_regno. */
3136 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3137 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3138
3139 /* nds32_emit_stack_pop_multiple(first_regno, last_regno),
3140 the pattern 'stack_pop_multiple' is implementad in nds32.md.
3141 For En4 field, we have to calculate its constant value.
3142 Refer to Andes ISA for more information. */
3143 en4_const = 0;
3144 if (cfun->machine->fp_size)
3145 en4_const += 8;
3146 if (cfun->machine->gp_size)
3147 en4_const += 4;
3148 if (cfun->machine->lp_size)
3149 en4_const += 2;
3150
3151 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3152 to be saved, we don't have to create multiple pop instruction.
3153 Otherwise, a multiple pop instruction is needed. */
3154 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
3155 {
3156 /* Create multiple pop instruction rtx. */
3157 nds32_emit_stack_pop_multiple (Rb, Re, GEN_INT (en4_const));
3158 }
3159
3160 /* If this is a variadic function, we do not have to restore argument
3161 registers but need to adjust stack pointer back to previous stack
3162 frame location before return. */
3163 if (cfun->machine->va_args_size != 0)
3164 {
3165 /* Generate sp adjustment instruction.
3166 We need to consider padding bytes here. */
3167 sp_adjust = cfun->machine->va_args_size
3168 + cfun->machine->va_args_area_padding_bytes;
3169 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3170 stack_pointer_rtx,
3171 GEN_INT (sp_adjust));
3172 /* Emit rtx into instructions list and receive INSN rtx form. */
3173 sp_adjust_insn = emit_insn (sp_adjust_insn);
3174
3175 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3176 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3177 generate CFI (Call Frame Information) stuff. */
3178 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3179 }
3180
3181 /* Generate return instruction. */
3182 if (!sibcall_p)
3183 emit_jump_insn (gen_return_internal ());
3184 }
3185
3186 /* Function for v3push prologue. */
3187 void
3188 nds32_expand_prologue_v3push (void)
3189 {
3190 int fp_adjust;
3191 int sp_adjust;
3192
3193 rtx Rb, Re;
3194 rtx fp_adjust_insn, sp_adjust_insn;
3195
3196 /* Compute and setup stack frame size.
3197 The result will be in cfun->machine. */
3198 nds32_compute_stack_frame ();
3199
3200 /* If the function is 'naked',
3201 we do not have to generate prologue code fragment. */
3202 if (cfun->machine->naked_p)
3203 return;
3204
3205 /* Get callee_first_regno and callee_last_regno. */
3206 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3207 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3208
3209 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
3210 where imm8u has to be 8-byte alignment. */
3211 sp_adjust = cfun->machine->local_size
3212 + cfun->machine->out_args_size
3213 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3214
3215 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3216 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
3217 {
3218 /* We can use 'push25 Re,imm8u'. */
3219
3220 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3221 the pattern 'stack_v3push' is implemented in nds32.md.
3222 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3223 nds32_emit_stack_v3push (Rb, Re,
3224 GEN_INT (14), GEN_INT (sp_adjust));
3225
3226 /* Check frame_pointer_needed to see
3227 if we shall emit fp adjustment instruction. */
3228 if (frame_pointer_needed)
3229 {
3230 /* adjust $fp = $sp + 4 ($fp size)
3231 + 4 ($gp size)
3232 + 4 ($lp size)
3233 + (4 * n) (callee-saved registers)
3234 + sp_adjust ('push25 Re,imm8u')
3235 Note: Since we use 'push25 Re,imm8u',
3236 the position of stack pointer is further
3237 changed after push instruction.
3238 Hence, we need to take sp_adjust value
3239 into consideration. */
3240 fp_adjust = cfun->machine->fp_size
3241 + cfun->machine->gp_size
3242 + cfun->machine->lp_size
3243 + cfun->machine->callee_saved_gpr_regs_size
3244 + sp_adjust;
3245 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3246 stack_pointer_rtx,
3247 GEN_INT (fp_adjust));
3248 /* Emit rtx into instructions list and receive INSN rtx form. */
3249 fp_adjust_insn = emit_insn (fp_adjust_insn);
3250 }
3251 }
3252 else
3253 {
3254 /* We have to use 'push25 Re,0' and
3255 expand one more instruction to adjust $sp later. */
3256
3257 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
3258 the pattern 'stack_v3push' is implemented in nds32.md.
3259 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3260 nds32_emit_stack_v3push (Rb, Re,
3261 GEN_INT (14), GEN_INT (0));
3262
3263 /* Check frame_pointer_needed to see
3264 if we shall emit fp adjustment instruction. */
3265 if (frame_pointer_needed)
3266 {
3267 /* adjust $fp = $sp + 4 ($fp size)
3268 + 4 ($gp size)
3269 + 4 ($lp size)
3270 + (4 * n) (callee-saved registers)
3271 Note: Since we use 'push25 Re,0',
3272 the stack pointer is just at the position
3273 after push instruction.
3274 No need to take sp_adjust into consideration. */
3275 fp_adjust = cfun->machine->fp_size
3276 + cfun->machine->gp_size
3277 + cfun->machine->lp_size
3278 + cfun->machine->callee_saved_gpr_regs_size;
3279 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3280 stack_pointer_rtx,
3281 GEN_INT (fp_adjust));
3282 /* Emit rtx into instructions list and receive INSN rtx form. */
3283 fp_adjust_insn = emit_insn (fp_adjust_insn);
3284 }
3285
3286 /* Because we use 'push25 Re,0',
3287 we need to expand one more instruction to adjust $sp.
3288 However, sp_adjust value may be out of range of the addi instruction,
3289 create alternative add behavior with TA_REGNUM if necessary,
3290 using NEGATIVE value to tell that we are decreasing address. */
3291 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3292 if (sp_adjust)
3293 {
3294 /* Generate sp adjustment instruction
3295 if and only if sp_adjust != 0. */
3296 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3297 stack_pointer_rtx,
3298 GEN_INT (-1 * sp_adjust));
3299 /* Emit rtx into instructions list and receive INSN rtx form. */
3300 sp_adjust_insn = emit_insn (sp_adjust_insn);
3301
3302 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3303 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3304 generate CFI (Call Frame Information) stuff. */
3305 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3306 }
3307 }
3308
3309 /* Prevent the instruction scheduler from
3310 moving instructions across the boundary. */
3311 emit_insn (gen_blockage ());
3312 }
3313
3314 /* Function for v3pop epilogue. */
3315 void
3316 nds32_expand_epilogue_v3pop (bool sibcall_p)
3317 {
3318 int sp_adjust;
3319
3320 rtx Rb, Re;
3321 rtx sp_adjust_insn;
3322
3323 /* Compute and setup stack frame size.
3324 The result will be in cfun->machine. */
3325 nds32_compute_stack_frame ();
3326
3327 /* Prevent the instruction scheduler from
3328 moving instructions across the boundary. */
3329 emit_insn (gen_blockage ());
3330
3331 /* If the function is 'naked', we do not have to generate
3332 epilogue code fragment BUT 'ret' instruction. */
3333 if (cfun->machine->naked_p)
3334 {
3335 /* Generate return instruction by using 'return_internal' pattern.
3336 Make sure this instruction is after gen_blockage(). */
3337 if (!sibcall_p)
3338 emit_jump_insn (gen_return_internal ());
3339 return;
3340 }
3341
3342 /* Get callee_first_regno and callee_last_regno. */
3343 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_first_gpr_regno);
3344 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_last_gpr_regno);
3345
3346 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3347 where imm8u has to be 8-byte alignment. */
3348 sp_adjust = cfun->machine->local_size
3349 + cfun->machine->out_args_size
3350 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3351
3352 /* We have to consider alloca issue as well.
3353 If the function does call alloca(), the stack pointer is not fixed.
3354 In that case, we cannot use 'pop25 Re,imm8u' directly.
3355 We have to caculate stack pointer from frame pointer
3356 and then use 'pop25 Re,0'.
3357 Of course, the frame_pointer_needed should be nonzero
3358 if the function calls alloca(). */
3359 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3360 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
3361 && !cfun->calls_alloca)
3362 {
3363 /* We can use 'pop25 Re,imm8u'. */
3364
3365 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3366 the pattern 'stack_v3pop' is implementad in nds32.md.
3367 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3368 nds32_emit_stack_v3pop (Rb, Re,
3369 GEN_INT (14), GEN_INT (sp_adjust));
3370 }
3371 else
3372 {
3373 /* We have to use 'pop25 Re,0', and prior to it,
3374 we must expand one more instruction to adjust $sp. */
3375
3376 if (frame_pointer_needed)
3377 {
3378 /* adjust $sp = $fp - 4 ($fp size)
3379 - 4 ($gp size)
3380 - 4 ($lp size)
3381 - (4 * n) (callee-saved registers)
3382 Note: No need to adjust
3383 cfun->machine->callee_saved_area_padding_bytes,
3384 because we want to adjust stack pointer
3385 to the position for pop instruction. */
3386 sp_adjust = cfun->machine->fp_size
3387 + cfun->machine->gp_size
3388 + cfun->machine->lp_size
3389 + cfun->machine->callee_saved_gpr_regs_size;
3390 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3391 hard_frame_pointer_rtx,
3392 GEN_INT (-1 * sp_adjust));
3393 /* Emit rtx into instructions list and receive INSN rtx form. */
3394 sp_adjust_insn = emit_insn (sp_adjust_insn);
3395 }
3396 else
3397 {
3398 /* If frame pointer is NOT needed,
3399 we cannot calculate the sp adjustment from frame pointer.
3400 Instead, we calculate the adjustment by local_size,
3401 out_args_size, and callee_saved_area_padding_bytes.
3402 Notice that such sp adjustment value may be out of range,
3403 so we have to deal with it as well. */
3404
3405 /* Adjust $sp = $sp + local_size + out_args_size
3406 + callee_saved_area_padding_bytes. */
3407 sp_adjust = cfun->machine->local_size
3408 + cfun->machine->out_args_size
3409 + cfun->machine->callee_saved_area_gpr_padding_bytes;
3410 /* sp_adjust value may be out of range of the addi instruction,
3411 create alternative add behavior with TA_REGNUM if necessary,
3412 using POSITIVE value to tell that we are increasing address. */
3413 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3414 if (sp_adjust)
3415 {
3416 /* Generate sp adjustment instruction
3417 if and only if sp_adjust != 0. */
3418 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3419 stack_pointer_rtx,
3420 GEN_INT (sp_adjust));
3421 /* Emit rtx into instructions list and receive INSN rtx form. */
3422 sp_adjust_insn = emit_insn (sp_adjust_insn);
3423 }
3424 }
3425
3426 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
3427 the pattern 'stack_v3pop' is implementad in nds32.md. */
3428 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3429 nds32_emit_stack_v3pop (Rb, Re,
3430 GEN_INT (14), GEN_INT (0));
3431 }
3432
3433 /* Generate return instruction. */
3434 emit_jump_insn (gen_pop25return ());
3435 }
3436
3437 /* Return nonzero if this function is known to have a null epilogue.
3438 This allows the optimizer to omit jumps to jumps if no stack
3439 was created. */
3440 int
3441 nds32_can_use_return_insn (void)
3442 {
3443 /* Prior to reloading, we can't tell how many registers must be saved.
3444 Thus we can not determine whether this function has null epilogue. */
3445 if (!reload_completed)
3446 return 0;
3447
3448 /* If no stack was created, two conditions must be satisfied:
3449 1. This is a naked function.
3450 So there is no callee-saved, local size, or outgoing size.
3451 2. This is NOT a variadic function.
3452 So there is no pushing arguement registers into the stack. */
3453 return (cfun->machine->naked_p && (cfun->machine->va_args_size == 0));
3454 }
3455
3456 /* ------------------------------------------------------------------------ */
3457
3458 /* Function to test 333-form for load/store instructions.
3459 This is auxiliary extern function for auxiliary macro in nds32.h.
3460 Because it is a little complicated, we use function instead of macro. */
3461 bool
3462 nds32_ls_333_p (rtx rt, rtx ra, rtx imm, machine_mode mode)
3463 {
3464 if (REGNO_REG_CLASS (REGNO (rt)) == LOW_REGS
3465 && REGNO_REG_CLASS (REGNO (ra)) == LOW_REGS)
3466 {
3467 if (GET_MODE_SIZE (mode) == 4)
3468 return satisfies_constraint_Iu05 (imm);
3469
3470 if (GET_MODE_SIZE (mode) == 2)
3471 return satisfies_constraint_Iu04 (imm);
3472
3473 if (GET_MODE_SIZE (mode) == 1)
3474 return satisfies_constraint_Iu03 (imm);
3475 }
3476
3477 return false;
3478 }
3479
3480
3481 /* Computing the Length of an Insn.
3482 Modifies the length assigned to instruction INSN.
3483 LEN is the initially computed length of the insn. */
3484 int
3485 nds32_adjust_insn_length (rtx_insn *insn, int length)
3486 {
3487 rtx src, dst;
3488
3489 switch (recog_memoized (insn))
3490 {
3491 case CODE_FOR_move_df:
3492 case CODE_FOR_move_di:
3493 /* Adjust length of movd44 to 2. */
3494 src = XEXP (PATTERN (insn), 1);
3495 dst = XEXP (PATTERN (insn), 0);
3496
3497 if (REG_P (src)
3498 && REG_P (dst)
3499 && (REGNO (src) % 2) == 0
3500 && (REGNO (dst) % 2) == 0)
3501 length = 2;
3502 break;
3503
3504 default:
3505 break;
3506 }
3507
3508 return length;
3509 }
3510
3511
3512 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
3513 int
3514 nds32_target_alignment (rtx label)
3515 {
3516 rtx_insn *insn;
3517
3518 if (optimize_size)
3519 return 0;
3520
3521 insn = next_active_insn (label);
3522
3523 if (insn == 0)
3524 return 0;
3525 else if ((get_attr_length (insn) % 4) == 0)
3526 return 2;
3527 else
3528 return 0;
3529 }
3530
3531 /* ------------------------------------------------------------------------ */
3532
3533 /* PART 5: Initialize target hook structure and definitions. */
3534 \f
3535 /* Controlling the Compilation Driver. */
3536
3537 \f
3538 /* Run-time Target Specification. */
3539
3540 \f
3541 /* Defining Data Structures for Per-function Information. */
3542
3543 \f
3544 /* Storage Layout. */
3545
3546 #undef TARGET_PROMOTE_FUNCTION_MODE
3547 #define TARGET_PROMOTE_FUNCTION_MODE \
3548 default_promote_function_mode_always_promote
3549
3550 \f
3551 /* Layout of Source Language Data Types. */
3552
3553 \f
3554 /* Register Usage. */
3555
3556 /* -- Basic Characteristics of Registers. */
3557
3558 /* -- Order of Allocation of Registers. */
3559
3560 /* -- How Values Fit in Registers. */
3561
3562 /* -- Handling Leaf Functions. */
3563
3564 /* -- Registers That Form a Stack. */
3565
3566 \f
3567 /* Register Classes. */
3568
3569 #undef TARGET_CLASS_MAX_NREGS
3570 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
3571
3572 #undef TARGET_LRA_P
3573 #define TARGET_LRA_P hook_bool_void_true
3574
3575 #undef TARGET_REGISTER_PRIORITY
3576 #define TARGET_REGISTER_PRIORITY nds32_register_priority
3577
3578 \f
3579 /* Obsolete Macros for Defining Constraints. */
3580
3581 \f
3582 /* Stack Layout and Calling Conventions. */
3583
3584 /* -- Basic Stack Layout. */
3585
3586 /* -- Exception Handling Support. */
3587
3588 /* -- Specifying How Stack Checking is Done. */
3589
3590 /* -- Registers That Address the Stack Frame. */
3591
3592 /* -- Eliminating Frame Pointer and Arg Pointer. */
3593
3594 #undef TARGET_CAN_ELIMINATE
3595 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
3596
3597 /* -- Passing Function Arguments on the Stack. */
3598
3599 /* -- Passing Arguments in Registers. */
3600
3601 #undef TARGET_FUNCTION_ARG
3602 #define TARGET_FUNCTION_ARG nds32_function_arg
3603
3604 #undef TARGET_MUST_PASS_IN_STACK
3605 #define TARGET_MUST_PASS_IN_STACK nds32_must_pass_in_stack
3606
3607 #undef TARGET_ARG_PARTIAL_BYTES
3608 #define TARGET_ARG_PARTIAL_BYTES nds32_arg_partial_bytes
3609
3610 #undef TARGET_FUNCTION_ARG_ADVANCE
3611 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
3612
3613 #undef TARGET_FUNCTION_ARG_BOUNDARY
3614 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
3615
3616 /* -- How Scalar Function Values Are Returned. */
3617
3618 #undef TARGET_FUNCTION_VALUE
3619 #define TARGET_FUNCTION_VALUE nds32_function_value
3620
3621 #undef TARGET_LIBCALL_VALUE
3622 #define TARGET_LIBCALL_VALUE nds32_libcall_value
3623
3624 #undef TARGET_FUNCTION_VALUE_REGNO_P
3625 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
3626
3627 /* -- How Large Values Are Returned. */
3628
3629 /* -- Caller-Saves Register Allocation. */
3630
3631 /* -- Function Entry and Exit. */
3632
3633 #undef TARGET_ASM_FUNCTION_PROLOGUE
3634 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
3635
3636 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
3637 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
3638
3639 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
3640 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
3641
3642 #undef TARGET_ASM_FUNCTION_EPILOGUE
3643 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
3644
3645 #undef TARGET_ASM_OUTPUT_MI_THUNK
3646 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
3647
3648 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3649 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
3650
3651 /* -- Generating Code for Profiling. */
3652
3653 /* -- Permitting tail calls. */
3654
3655 #undef TARGET_WARN_FUNC_RETURN
3656 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
3657
3658 /* Stack smashing protection. */
3659
3660 \f
3661 /* Implementing the Varargs Macros. */
3662
3663 #undef TARGET_SETUP_INCOMING_VARARGS
3664 #define TARGET_SETUP_INCOMING_VARARGS nds32_setup_incoming_varargs
3665
3666 #undef TARGET_STRICT_ARGUMENT_NAMING
3667 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
3668
3669 \f
3670 /* Trampolines for Nested Functions. */
3671
3672 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
3673 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
3674
3675 #undef TARGET_TRAMPOLINE_INIT
3676 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
3677
3678 \f
3679 /* Implicit Calls to Library Routines. */
3680
3681 \f
3682 /* Addressing Modes. */
3683
3684 #undef TARGET_LEGITIMATE_ADDRESS_P
3685 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
3686
3687 \f
3688 /* Anchored Addresses. */
3689
3690 \f
3691 /* Condition Code Status. */
3692
3693 /* -- Representation of condition codes using (cc0). */
3694
3695 /* -- Representation of condition codes using registers. */
3696
3697 /* -- Macros to control conditional execution. */
3698
3699 \f
3700 /* Describing Relative Costs of Operations. */
3701
3702 #undef TARGET_REGISTER_MOVE_COST
3703 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
3704
3705 #undef TARGET_MEMORY_MOVE_COST
3706 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
3707
3708 #undef TARGET_RTX_COSTS
3709 #define TARGET_RTX_COSTS nds32_rtx_costs
3710
3711 #undef TARGET_ADDRESS_COST
3712 #define TARGET_ADDRESS_COST nds32_address_cost
3713
3714 \f
3715 /* Adjusting the Instruction Scheduler. */
3716
3717 \f
3718 /* Dividing the Output into Sections (Texts, Data, . . . ). */
3719
3720 #undef TARGET_ENCODE_SECTION_INFO
3721 #define TARGET_ENCODE_SECTION_INFO nds32_encode_section_info
3722
3723 \f
3724 /* Position Independent Code. */
3725
3726 \f
3727 /* Defining the Output Assembler Language. */
3728
3729 /* -- The Overall Framework of an Assembler File. */
3730
3731 #undef TARGET_ASM_FILE_START
3732 #define TARGET_ASM_FILE_START nds32_asm_file_start
3733 #undef TARGET_ASM_FILE_END
3734 #define TARGET_ASM_FILE_END nds32_asm_file_end
3735
3736 /* -- Output of Data. */
3737
3738 #undef TARGET_ASM_ALIGNED_HI_OP
3739 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3740
3741 #undef TARGET_ASM_ALIGNED_SI_OP
3742 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
3743
3744 /* -- Output of Uninitialized Variables. */
3745
3746 /* -- Output and Generation of Labels. */
3747
3748 #undef TARGET_ASM_GLOBALIZE_LABEL
3749 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
3750
3751 /* -- How Initialization Functions Are Handled. */
3752
3753 /* -- Macros Controlling Initialization Routines. */
3754
3755 /* -- Output of Assembler Instructions. */
3756
3757 #undef TARGET_PRINT_OPERAND
3758 #define TARGET_PRINT_OPERAND nds32_print_operand
3759 #undef TARGET_PRINT_OPERAND_ADDRESS
3760 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
3761
3762 /* -- Output of Dispatch Tables. */
3763
3764 /* -- Assembler Commands for Exception Regions. */
3765
3766 /* -- Assembler Commands for Alignment. */
3767
3768 \f
3769 /* Controlling Debugging Information Format. */
3770
3771 /* -- Macros Affecting All Debugging Formats. */
3772
3773 /* -- Specific Options for DBX Output. */
3774
3775 /* -- Open-Ended Hooks for DBX Format. */
3776
3777 /* -- File Names in DBX Format. */
3778
3779 /* -- Macros for SDB and DWARF Output. */
3780
3781 /* -- Macros for VMS Debug Format. */
3782
3783 \f
3784 /* Cross Compilation and Floating Point. */
3785
3786 \f
3787 /* Mode Switching Instructions. */
3788
3789 \f
3790 /* Defining target-specific uses of __attribute__. */
3791
3792 #undef TARGET_ATTRIBUTE_TABLE
3793 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
3794
3795 #undef TARGET_MERGE_DECL_ATTRIBUTES
3796 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
3797
3798 #undef TARGET_INSERT_ATTRIBUTES
3799 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
3800
3801 #undef TARGET_OPTION_PRAGMA_PARSE
3802 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
3803
3804 #undef TARGET_OPTION_OVERRIDE
3805 #define TARGET_OPTION_OVERRIDE nds32_option_override
3806
3807 \f
3808 /* Emulating TLS. */
3809
3810 \f
3811 /* Defining coprocessor specifics for MIPS targets. */
3812
3813 \f
3814 /* Parameters for Precompiled Header Validity Checking. */
3815
3816 \f
3817 /* C++ ABI parameters. */
3818
3819 \f
3820 /* Adding support for named address spaces. */
3821
3822 \f
3823 /* Miscellaneous Parameters. */
3824
3825 #undef TARGET_INIT_BUILTINS
3826 #define TARGET_INIT_BUILTINS nds32_init_builtins
3827
3828 #undef TARGET_EXPAND_BUILTIN
3829 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
3830
3831 \f
3832 /* ------------------------------------------------------------------------ */
3833
3834 /* Initialize the GCC target structure. */
3835
3836 struct gcc_target targetm = TARGET_INITIALIZER;
3837
3838 /* ------------------------------------------------------------------------ */