]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/nds32/nds32.c
* expr.h: Remove prototypes of functions defined in builtins.c.
[thirdparty/gcc.git] / gcc / config / nds32 / nds32.c
1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2014 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "varasm.h"
30 #include "calls.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h" /* Required by recog.h. */
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h" /* For DFA state_t. */
38 #include "insn-codes.h" /* For CODE_FOR_xxx. */
39 #include "reload.h" /* For push_reload(). */
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "recog.h"
44 #include "diagnostic-core.h"
45 #include "df.h"
46 #include "tm_p.h"
47 #include "tm-constrs.h"
48 #include "optabs.h" /* For GEN_FCN. */
49 #include "target.h"
50 #include "target-def.h"
51 #include "langhooks.h" /* For add_builtin_function(). */
52 #include "ggc.h"
53 #include "builtins.h"
54
55 /* ------------------------------------------------------------------------ */
56
57 /* This file is divided into five parts:
58
59 PART 1: Auxiliary static variable definitions and
60 target hook static variable definitions.
61
62 PART 2: Auxiliary static function definitions.
63
64 PART 3: Implement target hook stuff definitions.
65
66 PART 4: Implemet extern function definitions,
67 the prototype is in nds32-protos.h.
68
69 PART 5: Initialize target hook structure and definitions. */
70
71 /* ------------------------------------------------------------------------ */
72
73 /* PART 1: Auxiliary static variable definitions and
74 target hook static variable definitions. */
75
76 /* Refer to nds32.h, there are maximum 73 isr vectors in nds32 architecture.
77 0 for reset handler with __attribute__((reset())),
78 1-8 for exception handler with __attribute__((exception(1,...,8))),
79 and 9-72 for interrupt handler with __attribute__((interrupt(0,...,63))).
80 We use an array to record essential information for each vector. */
81 static struct nds32_isr_info nds32_isr_vectors[NDS32_N_ISR_VECTORS];
82
83 /* Define intrinsic register names.
84 Please refer to nds32_intrinsic.h file, the index is corresponding to
85 'enum nds32_intrinsic_registers' data type values.
86 NOTE that the base value starting from 1024. */
87 static const char * const nds32_intrinsic_register_names[] =
88 {
89 "$PSW", "$IPSW", "$ITYPE", "$IPC"
90 };
91
92 /* Defining target-specific uses of __attribute__. */
93 static const struct attribute_spec nds32_attribute_table[] =
94 {
95 /* Syntax: { name, min_len, max_len, decl_required, type_required,
96 function_type_required, handler, affects_type_identity } */
97
98 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
99 { "interrupt", 1, 64, false, false, false, NULL, false },
100 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
101 { "exception", 1, 8, false, false, false, NULL, false },
102 /* Argument is user's interrupt numbers. The vector number is always 0. */
103 { "reset", 1, 1, false, false, false, NULL, false },
104
105 /* The attributes describing isr nested type. */
106 { "nested", 0, 0, false, false, false, NULL, false },
107 { "not_nested", 0, 0, false, false, false, NULL, false },
108 { "nested_ready", 0, 0, false, false, false, NULL, false },
109
110 /* The attributes describing isr register save scheme. */
111 { "save_all", 0, 0, false, false, false, NULL, false },
112 { "partial_save", 0, 0, false, false, false, NULL, false },
113
114 /* The attributes used by reset attribute. */
115 { "nmi", 1, 1, false, false, false, NULL, false },
116 { "warm", 1, 1, false, false, false, NULL, false },
117
118 /* The attribute telling no prologue/epilogue. */
119 { "naked", 0, 0, false, false, false, NULL, false },
120
121 /* The last attribute spec is set to be NULL. */
122 { NULL, 0, 0, false, false, false, NULL, false }
123 };
124
125
126 /* ------------------------------------------------------------------------ */
127
128 /* PART 2: Auxiliary static function definitions. */
129
130 /* Function to save and restore machine-specific function data. */
131 static struct machine_function *
132 nds32_init_machine_status (void)
133 {
134 struct machine_function *machine;
135 machine = ggc_cleared_alloc<machine_function> ();
136
137 /* Initially assume this function needs prologue/epilogue. */
138 machine->naked_p = 0;
139
140 /* Initially assume this function does NOT use fp_as_gp optimization. */
141 machine->fp_as_gp_p = 0;
142
143 return machine;
144 }
145
146 /* Function to compute stack frame size and
147 store into cfun->machine structure. */
148 static void
149 nds32_compute_stack_frame (void)
150 {
151 int r;
152 int block_size;
153
154 /* Because nds32_compute_stack_frame() will be called from different place,
155 everytime we enter this function, we have to assume this function
156 needs prologue/epilogue. */
157 cfun->machine->naked_p = 0;
158
159 /* Get variadic arguments size to prepare pretend arguments and
160 push them into stack at prologue.
161 Currently, we do not push variadic arguments by ourself.
162 We have GCC handle all the works.
163 The caller will push all corresponding nameless arguments into stack,
164 and the callee is able to retrieve them without problems.
165 These variables are still preserved in case one day
166 we would like caller passing arguments with registers. */
167 cfun->machine->va_args_size = 0;
168 cfun->machine->va_args_first_regno = SP_REGNUM;
169 cfun->machine->va_args_last_regno = SP_REGNUM;
170
171 /* Get local variables, incoming variables, and temporary variables size.
172 Note that we need to make sure it is 8-byte alignment because
173 there may be no padding bytes if we are using LRA. */
174 cfun->machine->local_size = NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
175
176 /* Get outgoing arguments size. */
177 cfun->machine->out_args_size = crtl->outgoing_args_size;
178
179 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
180 Check whether $fp is ever live. */
181 cfun->machine->fp_size = (df_regs_ever_live_p (FP_REGNUM)) ? 4 : 0;
182
183 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
184 Check whether we are using PIC code genration. */
185 cfun->machine->gp_size = (flag_pic) ? 4 : 0;
186
187 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
188 Check whether $lp is ever live. */
189 cfun->machine->lp_size = (df_regs_ever_live_p (LP_REGNUM)) ? 4 : 0;
190
191 /* Initially there is no padding bytes. */
192 cfun->machine->callee_saved_area_padding_bytes = 0;
193
194 /* Calculate the bytes of saving callee-saved registers on stack. */
195 cfun->machine->callee_saved_regs_size = 0;
196 cfun->machine->callee_saved_regs_first_regno = SP_REGNUM;
197 cfun->machine->callee_saved_regs_last_regno = SP_REGNUM;
198 /* Currently, there is no need to check $r28~$r31
199 because we will save them in another way. */
200 for (r = 0; r < 28; r++)
201 {
202 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
203 {
204 /* Mark the first required callee-saved register
205 (only need to set it once).
206 If first regno == SP_REGNUM, we can tell that
207 it is the first time to be here. */
208 if (cfun->machine->callee_saved_regs_first_regno == SP_REGNUM)
209 cfun->machine->callee_saved_regs_first_regno = r;
210 /* Mark the last required callee-saved register. */
211 cfun->machine->callee_saved_regs_last_regno = r;
212 }
213 }
214
215 /* Check if this function can omit prologue/epilogue code fragment.
216 If there is 'naked' attribute in this function,
217 we can set 'naked_p' flag to indicate that
218 we do not have to generate prologue/epilogue.
219 Or, if all the following conditions succeed,
220 we can set this function 'naked_p' as well:
221 condition 1: first_regno == last_regno == SP_REGNUM,
222 which means we do not have to save
223 any callee-saved registers.
224 condition 2: Both $lp and $fp are NOT live in this function,
225 which means we do not need to save them.
226 condition 3: There is no local_size, which means
227 we do not need to adjust $sp. */
228 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
229 || (cfun->machine->callee_saved_regs_first_regno == SP_REGNUM
230 && cfun->machine->callee_saved_regs_last_regno == SP_REGNUM
231 && !df_regs_ever_live_p (FP_REGNUM)
232 && !df_regs_ever_live_p (LP_REGNUM)
233 && cfun->machine->local_size == 0))
234 {
235 /* Set this function 'naked_p' and
236 other functions can check this flag. */
237 cfun->machine->naked_p = 1;
238
239 /* No need to save $fp, $gp, and $lp.
240 We should set these value to be zero
241 so that nds32_initial_elimination_offset() can work properly. */
242 cfun->machine->fp_size = 0;
243 cfun->machine->gp_size = 0;
244 cfun->machine->lp_size = 0;
245
246 /* If stack usage computation is required,
247 we need to provide the static stack size. */
248 if (flag_stack_usage_info)
249 current_function_static_stack_size = 0;
250
251 /* No need to do following adjustment, return immediately. */
252 return;
253 }
254
255 /* Adjustment for v3push instructions:
256 If we are using v3push (push25/pop25) instructions,
257 we need to make sure Rb is $r6 and Re is
258 located on $r6, $r8, $r10, or $r14.
259 Some results above will be discarded and recomputed.
260 Note that it is only available under V3/V3M ISA. */
261 if (TARGET_V3PUSH)
262 {
263 /* Recompute:
264 cfun->machine->fp_size
265 cfun->machine->gp_size
266 cfun->machine->lp_size
267 cfun->machine->callee_saved_regs_first_regno
268 cfun->machine->callee_saved_regs_last_regno */
269
270 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
271 cfun->machine->fp_size = 4;
272 cfun->machine->gp_size = 4;
273 cfun->machine->lp_size = 4;
274
275 /* Remember to set Rb = $r6. */
276 cfun->machine->callee_saved_regs_first_regno = 6;
277
278 if (cfun->machine->callee_saved_regs_last_regno <= 6)
279 {
280 /* Re = $r6 */
281 cfun->machine->callee_saved_regs_last_regno = 6;
282 }
283 else if (cfun->machine->callee_saved_regs_last_regno <= 8)
284 {
285 /* Re = $r8 */
286 cfun->machine->callee_saved_regs_last_regno = 8;
287 }
288 else if (cfun->machine->callee_saved_regs_last_regno <= 10)
289 {
290 /* Re = $r10 */
291 cfun->machine->callee_saved_regs_last_regno = 10;
292 }
293 else if (cfun->machine->callee_saved_regs_last_regno <= 14)
294 {
295 /* Re = $r14 */
296 cfun->machine->callee_saved_regs_last_regno = 14;
297 }
298 else if (cfun->machine->callee_saved_regs_last_regno == SP_REGNUM)
299 {
300 /* If last_regno is SP_REGNUM, which means
301 it is never changed, so set it to Re = $r6. */
302 cfun->machine->callee_saved_regs_last_regno = 6;
303 }
304 else
305 {
306 /* The program flow should not go here. */
307 gcc_unreachable ();
308 }
309 }
310
311 /* We have correctly set callee_saved_regs_first_regno
312 and callee_saved_regs_last_regno.
313 Initially, the callee_saved_regs_size is supposed to be 0.
314 As long as callee_saved_regs_last_regno is not SP_REGNUM,
315 we can update callee_saved_regs_size with new size. */
316 if (cfun->machine->callee_saved_regs_last_regno != SP_REGNUM)
317 {
318 /* Compute pushed size of callee-saved registers. */
319 cfun->machine->callee_saved_regs_size
320 = 4 * (cfun->machine->callee_saved_regs_last_regno
321 - cfun->machine->callee_saved_regs_first_regno
322 + 1);
323 }
324
325 /* Important: We need to make sure that
326 (va_args_size + fp_size + gp_size
327 + lp_size + callee_saved_regs_size)
328 is 8-byte alignment.
329 If it is not, calculate the padding bytes. */
330 block_size = cfun->machine->va_args_size
331 + cfun->machine->fp_size
332 + cfun->machine->gp_size
333 + cfun->machine->lp_size
334 + cfun->machine->callee_saved_regs_size;
335 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
336 {
337 cfun->machine->callee_saved_area_padding_bytes
338 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
339 }
340
341 /* If stack usage computation is required,
342 we need to provide the static stack size. */
343 if (flag_stack_usage_info)
344 {
345 current_function_static_stack_size
346 = NDS32_ROUND_UP_DOUBLE_WORD (block_size)
347 + cfun->machine->local_size
348 + cfun->machine->out_args_size;
349 }
350 }
351
352 /* Function to create a parallel rtx pattern
353 which presents stack push multiple behavior.
354 The overall concept are:
355 "push registers to memory",
356 "adjust stack pointer". */
357 static rtx
358 nds32_gen_stack_push_multiple (rtx Rb, rtx Re,
359 rtx En4 ATTRIBUTE_UNUSED)
360 {
361 int regno;
362 int extra_count;
363 int num_use_regs;
364 int par_index;
365 int offset;
366
367 rtx reg;
368 rtx mem;
369 rtx push_rtx;
370 rtx adjust_sp_rtx;
371 rtx parallel_insn;
372
373 /* We need to provide a customized rtx which contains
374 necessary information for data analysis,
375 so we create a parallel rtx like this:
376 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
377 (reg:SI Rb))
378 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
379 (reg:SI Rb+1))
380 ...
381 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
382 (reg:SI Re))
383 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
384 (reg:SI FP_REGNUM))
385 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
386 (reg:SI GP_REGNUM))
387 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
388 (reg:SI LP_REGNUM))
389 (set (reg:SI SP_REGNUM)
390 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
391
392 /* Calculate the number of registers that will be pushed. */
393 extra_count = 0;
394 if (cfun->machine->fp_size)
395 extra_count++;
396 if (cfun->machine->gp_size)
397 extra_count++;
398 if (cfun->machine->lp_size)
399 extra_count++;
400 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
401 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
402 num_use_regs = extra_count;
403 else
404 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
405
406 /* In addition to used registers,
407 we need one more space for (set sp sp-x) rtx. */
408 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
409 rtvec_alloc (num_use_regs + 1));
410 par_index = 0;
411
412 /* Initialize offset and start to create push behavior. */
413 offset = -(num_use_regs * 4);
414
415 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
416 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
417 {
418 /* Rb and Re may be SP_REGNUM.
419 We need to break this loop immediately. */
420 if (regno == SP_REGNUM)
421 break;
422
423 reg = gen_rtx_REG (SImode, regno);
424 mem = gen_frame_mem (SImode, plus_constant (Pmode,
425 stack_pointer_rtx,
426 offset));
427 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
428 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
429 RTX_FRAME_RELATED_P (push_rtx) = 1;
430 offset = offset + 4;
431 par_index++;
432 }
433
434 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
435 if (cfun->machine->fp_size)
436 {
437 reg = gen_rtx_REG (SImode, FP_REGNUM);
438 mem = gen_frame_mem (SImode, plus_constant (Pmode,
439 stack_pointer_rtx,
440 offset));
441 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
442 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
443 RTX_FRAME_RELATED_P (push_rtx) = 1;
444 offset = offset + 4;
445 par_index++;
446 }
447 if (cfun->machine->gp_size)
448 {
449 reg = gen_rtx_REG (SImode, GP_REGNUM);
450 mem = gen_frame_mem (SImode, plus_constant (Pmode,
451 stack_pointer_rtx,
452 offset));
453 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
454 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
455 RTX_FRAME_RELATED_P (push_rtx) = 1;
456 offset = offset + 4;
457 par_index++;
458 }
459 if (cfun->machine->lp_size)
460 {
461 reg = gen_rtx_REG (SImode, LP_REGNUM);
462 mem = gen_frame_mem (SImode, plus_constant (Pmode,
463 stack_pointer_rtx,
464 offset));
465 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
466 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
467 RTX_FRAME_RELATED_P (push_rtx) = 1;
468 offset = offset + 4;
469 par_index++;
470 }
471
472 /* Create (set sp sp-x). */
473
474 /* We need to re-calculate the offset value again for adjustment. */
475 offset = -(num_use_regs * 4);
476 adjust_sp_rtx
477 = gen_rtx_SET (VOIDmode,
478 stack_pointer_rtx,
479 plus_constant (Pmode, stack_pointer_rtx, offset));
480 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
481 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
482
483 return parallel_insn;
484 }
485
486 /* Function to create a parallel rtx pattern
487 which presents stack pop multiple behavior.
488 The overall concept are:
489 "pop registers from memory",
490 "adjust stack pointer". */
491 static rtx
492 nds32_gen_stack_pop_multiple (rtx Rb, rtx Re,
493 rtx En4 ATTRIBUTE_UNUSED)
494 {
495 int regno;
496 int extra_count;
497 int num_use_regs;
498 int par_index;
499 int offset;
500
501 rtx reg;
502 rtx mem;
503 rtx pop_rtx;
504 rtx adjust_sp_rtx;
505 rtx parallel_insn;
506
507 /* We need to provide a customized rtx which contains
508 necessary information for data analysis,
509 so we create a parallel rtx like this:
510 (parallel [(set (reg:SI Rb)
511 (mem (reg:SI SP_REGNUM)))
512 (set (reg:SI Rb+1)
513 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
514 ...
515 (set (reg:SI Re)
516 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
517 (set (reg:SI FP_REGNUM)
518 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
519 (set (reg:SI GP_REGNUM)
520 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
521 (set (reg:SI LP_REGNUM)
522 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
523 (set (reg:SI SP_REGNUM)
524 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
525
526 /* Calculate the number of registers that will be poped. */
527 extra_count = 0;
528 if (cfun->machine->fp_size)
529 extra_count++;
530 if (cfun->machine->gp_size)
531 extra_count++;
532 if (cfun->machine->lp_size)
533 extra_count++;
534 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
535 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
536 num_use_regs = extra_count;
537 else
538 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
539
540 /* In addition to used registers,
541 we need one more space for (set sp sp+x) rtx. */
542 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
543 rtvec_alloc (num_use_regs + 1));
544 par_index = 0;
545
546 /* Initialize offset and start to create pop behavior. */
547 offset = 0;
548
549 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
550 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
551 {
552 /* Rb and Re may be SP_REGNUM.
553 We need to break this loop immediately. */
554 if (regno == SP_REGNUM)
555 break;
556
557 reg = gen_rtx_REG (SImode, regno);
558 mem = gen_frame_mem (SImode, plus_constant (Pmode,
559 stack_pointer_rtx,
560 offset));
561 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
562 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
563 RTX_FRAME_RELATED_P (pop_rtx) = 1;
564 offset = offset + 4;
565 par_index++;
566 }
567
568 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
569 if (cfun->machine->fp_size)
570 {
571 reg = gen_rtx_REG (SImode, FP_REGNUM);
572 mem = gen_frame_mem (SImode, plus_constant (Pmode,
573 stack_pointer_rtx,
574 offset));
575 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
576 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
577 RTX_FRAME_RELATED_P (pop_rtx) = 1;
578 offset = offset + 4;
579 par_index++;
580 }
581 if (cfun->machine->gp_size)
582 {
583 reg = gen_rtx_REG (SImode, GP_REGNUM);
584 mem = gen_frame_mem (SImode, plus_constant (Pmode,
585 stack_pointer_rtx,
586 offset));
587 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
588 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
589 RTX_FRAME_RELATED_P (pop_rtx) = 1;
590 offset = offset + 4;
591 par_index++;
592 }
593 if (cfun->machine->lp_size)
594 {
595 reg = gen_rtx_REG (SImode, LP_REGNUM);
596 mem = gen_frame_mem (SImode, plus_constant (Pmode,
597 stack_pointer_rtx,
598 offset));
599 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
600 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
601 RTX_FRAME_RELATED_P (pop_rtx) = 1;
602 offset = offset + 4;
603 par_index++;
604 }
605
606 /* Create (set sp sp+x). */
607
608 /* The offset value is already in place. No need to re-calculate it. */
609 adjust_sp_rtx
610 = gen_rtx_SET (VOIDmode,
611 stack_pointer_rtx,
612 plus_constant (Pmode, stack_pointer_rtx, offset));
613 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
614 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
615
616 return parallel_insn;
617 }
618
619 /* Function to create a parallel rtx pattern
620 which presents stack v3push behavior.
621 The overall concept are:
622 "push registers to memory",
623 "adjust stack pointer". */
624 static rtx
625 nds32_gen_stack_v3push (rtx Rb,
626 rtx Re,
627 rtx En4 ATTRIBUTE_UNUSED,
628 rtx imm8u)
629 {
630 int regno;
631 int num_use_regs;
632 int par_index;
633 int offset;
634
635 rtx reg;
636 rtx mem;
637 rtx push_rtx;
638 rtx adjust_sp_rtx;
639 rtx parallel_insn;
640
641 /* We need to provide a customized rtx which contains
642 necessary information for data analysis,
643 so we create a parallel rtx like this:
644 (parallel [
645 (set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
646 (reg:SI Rb))
647 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
648 (reg:SI Rb+1))
649 ...
650 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
651 (reg:SI Re))
652 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
653 (reg:SI FP_REGNUM))
654 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
655 (reg:SI GP_REGNUM))
656 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
657 (reg:SI LP_REGNUM))
658 (set (reg:SI SP_REGNUM)
659 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
660
661 /* Calculate the number of registers that will be pushed.
662 Since $fp, $gp, and $lp is always pushed with v3push instruction,
663 we need to count these three registers.
664 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
665 So there is no need to worry about Rb=Re=SP_REGNUM case. */
666 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
667
668 /* In addition to used registers,
669 we need one more space for (set sp sp-x-imm8u) rtx. */
670 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
671 rtvec_alloc (num_use_regs + 1));
672 par_index = 0;
673
674 /* Initialize offset and start to create push behavior. */
675 offset = -(num_use_regs * 4);
676
677 /* Create (set mem regX) from Rb, Rb+1 up to Re.
678 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
679 So there is no need to worry about Rb=Re=SP_REGNUM case. */
680 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
681 {
682 reg = gen_rtx_REG (SImode, regno);
683 mem = gen_frame_mem (SImode, plus_constant (Pmode,
684 stack_pointer_rtx,
685 offset));
686 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
687 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
688 RTX_FRAME_RELATED_P (push_rtx) = 1;
689 offset = offset + 4;
690 par_index++;
691 }
692
693 /* Create (set mem fp). */
694 reg = gen_rtx_REG (SImode, FP_REGNUM);
695 mem = gen_frame_mem (SImode, plus_constant (Pmode,
696 stack_pointer_rtx,
697 offset));
698 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
699 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
700 RTX_FRAME_RELATED_P (push_rtx) = 1;
701 offset = offset + 4;
702 par_index++;
703 /* Create (set mem gp). */
704 reg = gen_rtx_REG (SImode, GP_REGNUM);
705 mem = gen_frame_mem (SImode, plus_constant (Pmode,
706 stack_pointer_rtx,
707 offset));
708 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
709 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
710 RTX_FRAME_RELATED_P (push_rtx) = 1;
711 offset = offset + 4;
712 par_index++;
713 /* Create (set mem lp). */
714 reg = gen_rtx_REG (SImode, LP_REGNUM);
715 mem = gen_frame_mem (SImode, plus_constant (Pmode,
716 stack_pointer_rtx,
717 offset));
718 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
719 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
720 RTX_FRAME_RELATED_P (push_rtx) = 1;
721 offset = offset + 4;
722 par_index++;
723
724 /* Create (set sp sp-x-imm8u). */
725
726 /* We need to re-calculate the offset value again for adjustment. */
727 offset = -(num_use_regs * 4);
728 adjust_sp_rtx
729 = gen_rtx_SET (VOIDmode,
730 stack_pointer_rtx,
731 plus_constant (Pmode,
732 stack_pointer_rtx,
733 offset - INTVAL (imm8u)));
734 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
735 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
736
737 return parallel_insn;
738 }
739
740 /* Function to create a parallel rtx pattern
741 which presents stack v3pop behavior.
742 The overall concept are:
743 "pop registers from memory",
744 "adjust stack pointer". */
745 static rtx
746 nds32_gen_stack_v3pop (rtx Rb,
747 rtx Re,
748 rtx En4 ATTRIBUTE_UNUSED,
749 rtx imm8u)
750 {
751 int regno;
752 int num_use_regs;
753 int par_index;
754 int offset;
755
756 rtx reg;
757 rtx mem;
758 rtx pop_rtx;
759 rtx adjust_sp_rtx;
760 rtx parallel_insn;
761
762 /* We need to provide a customized rtx which contains
763 necessary information for data analysis,
764 so we create a parallel rtx like this:
765 (parallel [(set (reg:SI Rb)
766 (mem (reg:SI SP_REGNUM)))
767 (set (reg:SI Rb+1)
768 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
769 ...
770 (set (reg:SI Re)
771 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
772 (set (reg:SI FP_REGNUM)
773 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
774 (set (reg:SI GP_REGNUM)
775 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
776 (set (reg:SI LP_REGNUM)
777 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
778 (set (reg:SI SP_REGNUM)
779 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
780
781 /* Calculate the number of registers that will be poped.
782 Since $fp, $gp, and $lp is always poped with v3pop instruction,
783 we need to count these three registers.
784 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
785 So there is no need to worry about Rb=Re=SP_REGNUM case. */
786 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
787
788 /* In addition to used registers,
789 we need one more space for (set sp sp+x+imm8u) rtx. */
790 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
791 rtvec_alloc (num_use_regs + 1));
792 par_index = 0;
793
794 /* Initialize offset and start to create pop behavior. */
795 offset = 0;
796
797 /* Create (set regX mem) from Rb, Rb+1 up to Re.
798 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
799 So there is no need to worry about Rb=Re=SP_REGNUM case. */
800 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
801 {
802 reg = gen_rtx_REG (SImode, regno);
803 mem = gen_frame_mem (SImode, plus_constant (Pmode,
804 stack_pointer_rtx,
805 offset));
806 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
807 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
808 RTX_FRAME_RELATED_P (pop_rtx) = 1;
809 offset = offset + 4;
810 par_index++;
811 }
812
813 /* Create (set fp mem). */
814 reg = gen_rtx_REG (SImode, FP_REGNUM);
815 mem = gen_frame_mem (SImode, plus_constant (Pmode,
816 stack_pointer_rtx,
817 offset));
818 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
819 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
820 RTX_FRAME_RELATED_P (pop_rtx) = 1;
821 offset = offset + 4;
822 par_index++;
823 /* Create (set gp mem). */
824 reg = gen_rtx_REG (SImode, GP_REGNUM);
825 mem = gen_frame_mem (SImode, plus_constant (Pmode,
826 stack_pointer_rtx,
827 offset));
828 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
829 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
830 RTX_FRAME_RELATED_P (pop_rtx) = 1;
831 offset = offset + 4;
832 par_index++;
833 /* Create (set lp mem ). */
834 reg = gen_rtx_REG (SImode, LP_REGNUM);
835 mem = gen_frame_mem (SImode, plus_constant (Pmode,
836 stack_pointer_rtx,
837 offset));
838 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
839 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
840 RTX_FRAME_RELATED_P (pop_rtx) = 1;
841 offset = offset + 4;
842 par_index++;
843
844 /* Create (set sp sp+x+imm8u). */
845
846 /* The offset value is already in place. No need to re-calculate it. */
847 adjust_sp_rtx
848 = gen_rtx_SET (VOIDmode,
849 stack_pointer_rtx,
850 plus_constant (Pmode,
851 stack_pointer_rtx,
852 offset + INTVAL (imm8u)));
853 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
854 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
855
856 return parallel_insn;
857 }
858
859 /* A subroutine that checks multiple load and store
860 using consecutive registers.
861 OP is a parallel rtx we would like to check.
862 LOAD_P indicates whether we are checking load operation.
863 PAR_INDEX is starting element of parallel rtx.
864 FIRST_ELT_REGNO is used to tell starting register number.
865 COUNT helps us to check consecutive register numbers. */
866 static bool
867 nds32_consecutive_registers_load_store_p (rtx op,
868 bool load_p,
869 int par_index,
870 int first_elt_regno,
871 int count)
872 {
873 int i;
874 int check_regno;
875 rtx elt;
876 rtx elt_reg;
877 rtx elt_mem;
878
879 for (i = 0; i < count; i++)
880 {
881 /* Pick up each element from parallel rtx. */
882 elt = XVECEXP (op, 0, i + par_index);
883
884 /* If this element is not a 'set' rtx, return false immediately. */
885 if (GET_CODE (elt) != SET)
886 return false;
887
888 /* Pick up reg and mem of this element. */
889 elt_reg = load_p ? SET_DEST (elt) : SET_SRC (elt);
890 elt_mem = load_p ? SET_SRC (elt) : SET_DEST (elt);
891
892 /* If elt_reg is not a expected reg rtx, return false. */
893 if (GET_CODE (elt_reg) != REG || GET_MODE (elt_reg) != SImode)
894 return false;
895 /* If elt_mem is not a expected mem rtx, return false. */
896 if (GET_CODE (elt_mem) != MEM || GET_MODE (elt_mem) != SImode)
897 return false;
898
899 /* The consecutive registers should be in (Rb,Rb+1...Re) order. */
900 check_regno = first_elt_regno + i;
901
902 /* If the register number is not continuous, return false. */
903 if (REGNO (elt_reg) != (unsigned int) check_regno)
904 return false;
905 }
906
907 return true;
908 }
909
910 /* A helper function to emit section head template. */
911 static void
912 nds32_emit_section_head_template (char section_name[],
913 char symbol_name[],
914 int align_value,
915 bool object_p)
916 {
917 const char *flags_str;
918 const char *type_str;
919
920 flags_str = (object_p) ? "\"a\"" : "\"ax\"";
921 type_str = (object_p) ? "@object" : "@function";
922
923 fprintf (asm_out_file, "\t.section\t%s, %s\n", section_name, flags_str);
924 fprintf (asm_out_file, "\t.align\t%d\n", align_value);
925 fprintf (asm_out_file, "\t.global\t%s\n", symbol_name);
926 fprintf (asm_out_file, "\t.type\t%s, %s\n", symbol_name, type_str);
927 fprintf (asm_out_file, "%s:\n", symbol_name);
928 }
929
930 /* A helper function to emit section tail template. */
931 static void
932 nds32_emit_section_tail_template (char symbol_name[])
933 {
934 fprintf (asm_out_file, "\t.size\t%s, .-%s\n", symbol_name, symbol_name);
935 }
936
937 /* Function to emit isr jump table section. */
938 static void
939 nds32_emit_isr_jmptbl_section (int vector_id)
940 {
941 char section_name[100];
942 char symbol_name[100];
943
944 /* Prepare jmptbl section and symbol name. */
945 snprintf (section_name, sizeof (section_name),
946 ".nds32_jmptbl.%02d", vector_id);
947 snprintf (symbol_name, sizeof (symbol_name),
948 "_nds32_jmptbl_%02d", vector_id);
949
950 nds32_emit_section_head_template (section_name, symbol_name, 2, true);
951 fprintf (asm_out_file, "\t.word\t%s\n",
952 nds32_isr_vectors[vector_id].func_name);
953 nds32_emit_section_tail_template (symbol_name);
954 }
955
956 /* Function to emit isr vector section. */
957 static void
958 nds32_emit_isr_vector_section (int vector_id)
959 {
960 unsigned int vector_number_offset = 0;
961 const char *c_str = "CATEGORY";
962 const char *sr_str = "SR";
963 const char *nt_str = "NT";
964 const char *vs_str = "VS";
965 char first_level_handler_name[100];
966 char section_name[100];
967 char symbol_name[100];
968
969 /* Set the vector number offset so that we can calculate
970 the value that user specifies in the attribute.
971 We also prepare the category string for first level handler name. */
972 switch (nds32_isr_vectors[vector_id].category)
973 {
974 case NDS32_ISR_INTERRUPT:
975 vector_number_offset = 9;
976 c_str = "i";
977 break;
978 case NDS32_ISR_EXCEPTION:
979 vector_number_offset = 0;
980 c_str = "e";
981 break;
982 case NDS32_ISR_NONE:
983 case NDS32_ISR_RESET:
984 /* Normally it should not be here. */
985 gcc_unreachable ();
986 break;
987 }
988
989 /* Prepare save reg string for first level handler name. */
990 switch (nds32_isr_vectors[vector_id].save_reg)
991 {
992 case NDS32_SAVE_ALL:
993 sr_str = "sa";
994 break;
995 case NDS32_PARTIAL_SAVE:
996 sr_str = "ps";
997 break;
998 }
999
1000 /* Prepare nested type string for first level handler name. */
1001 switch (nds32_isr_vectors[vector_id].nested_type)
1002 {
1003 case NDS32_NESTED:
1004 nt_str = "ns";
1005 break;
1006 case NDS32_NOT_NESTED:
1007 nt_str = "nn";
1008 break;
1009 case NDS32_NESTED_READY:
1010 nt_str = "nr";
1011 break;
1012 }
1013
1014 /* Currently we have 4-byte or 16-byte size for each vector.
1015 If it is 4-byte, the first level handler name has suffix string "_4b". */
1016 vs_str = (nds32_isr_vector_size == 4) ? "_4b" : "";
1017
1018 /* Now we can create first level handler name. */
1019 snprintf (first_level_handler_name, sizeof (first_level_handler_name),
1020 "_nds32_%s_%s_%s%s", c_str, sr_str, nt_str, vs_str);
1021
1022 /* Prepare vector section and symbol name. */
1023 snprintf (section_name, sizeof (section_name),
1024 ".nds32_vector.%02d", vector_id);
1025 snprintf (symbol_name, sizeof (symbol_name),
1026 "_nds32_vector_%02d%s", vector_id, vs_str);
1027
1028
1029 /* Everything is ready. We can start emit vector section content. */
1030 nds32_emit_section_head_template (section_name, symbol_name,
1031 floor_log2 (nds32_isr_vector_size), false);
1032
1033 /* According to the vector size, the instructions in the
1034 vector section may be different. */
1035 if (nds32_isr_vector_size == 4)
1036 {
1037 /* This block is for 4-byte vector size.
1038 Hardware $VID support is necessary and only one instruction
1039 is needed in vector section. */
1040 fprintf (asm_out_file, "\tj\t%s ! jump to first level handler\n",
1041 first_level_handler_name);
1042 }
1043 else
1044 {
1045 /* This block is for 16-byte vector size.
1046 There is NO hardware $VID so that we need several instructions
1047 such as pushing GPRs and preparing software vid at vector section.
1048 For pushing GPRs, there are four variations for
1049 16-byte vector content and we have to handle each combination.
1050 For preparing software vid, note that the vid need to
1051 be substracted vector_number_offset. */
1052 if (TARGET_REDUCED_REGS)
1053 {
1054 if (nds32_isr_vectors[vector_id].save_reg == NDS32_SAVE_ALL)
1055 {
1056 /* Case of reduced set registers and save_all attribute. */
1057 fprintf (asm_out_file, "\t! reduced set regs + save_all\n");
1058 fprintf (asm_out_file, "\tsmw.adm\t$r15, [$sp], $r15, 0xf\n");
1059 fprintf (asm_out_file, "\tsmw.adm\t$r0, [$sp], $r10, 0x0\n");
1060
1061 }
1062 else
1063 {
1064 /* Case of reduced set registers and partial_save attribute. */
1065 fprintf (asm_out_file, "\t! reduced set regs + partial_save\n");
1066 fprintf (asm_out_file, "\tsmw.adm\t$r15, [$sp], $r15, 0x2\n");
1067 fprintf (asm_out_file, "\tsmw.adm\t$r0, [$sp], $r5, 0x0\n");
1068 }
1069 }
1070 else
1071 {
1072 if (nds32_isr_vectors[vector_id].save_reg == NDS32_SAVE_ALL)
1073 {
1074 /* Case of full set registers and save_all attribute. */
1075 fprintf (asm_out_file, "\t! full set regs + save_all\n");
1076 fprintf (asm_out_file, "\tsmw.adm\t$r0, [$sp], $r27, 0xf\n");
1077 }
1078 else
1079 {
1080 /* Case of full set registers and partial_save attribute. */
1081 fprintf (asm_out_file, "\t! full set regs + partial_save\n");
1082 fprintf (asm_out_file, "\tsmw.adm\t$r15, [$sp], $r27, 0x2\n");
1083 fprintf (asm_out_file, "\tsmw.adm\t$r0, [$sp], $r5, 0x0\n");
1084 }
1085 }
1086
1087 fprintf (asm_out_file, "\tmovi\t$r0, %d ! preparing software vid\n",
1088 vector_id - vector_number_offset);
1089 fprintf (asm_out_file, "\tj\t%s ! jump to first level handler\n",
1090 first_level_handler_name);
1091 }
1092
1093 nds32_emit_section_tail_template (symbol_name);
1094 }
1095
1096 /* Function to emit isr reset handler content.
1097 Including all jmptbl/vector references, jmptbl section,
1098 vector section, nmi handler section, and warm handler section. */
1099 static void
1100 nds32_emit_isr_reset_content (void)
1101 {
1102 unsigned int i;
1103 unsigned int total_n_vectors;
1104 const char *vs_str;
1105 char reset_handler_name[100];
1106 char section_name[100];
1107 char symbol_name[100];
1108
1109 total_n_vectors = nds32_isr_vectors[0].total_n_vectors;
1110 vs_str = (nds32_isr_vector_size == 4) ? "_4b" : "";
1111
1112 fprintf (asm_out_file, "\t! RESET HANDLER CONTENT - BEGIN !\n");
1113
1114 /* Create references in .rodata according to total number of vectors. */
1115 fprintf (asm_out_file, "\t.section\t.rodata\n");
1116 fprintf (asm_out_file, "\t.align\t2\n");
1117
1118 /* Emit jmptbl references. */
1119 fprintf (asm_out_file, "\t ! references to jmptbl section entries\n");
1120 for (i = 0; i < total_n_vectors; i++)
1121 fprintf (asm_out_file, "\t.word\t_nds32_jmptbl_%02d\n", i);
1122
1123 /* Emit vector references. */
1124 fprintf (asm_out_file, "\t ! references to vector section entries\n");
1125 for (i = 0; i < total_n_vectors; i++)
1126 fprintf (asm_out_file, "\t.word\t_nds32_vector_%02d%s\n", i, vs_str);
1127
1128 /* Emit jmptbl_00 section. */
1129 snprintf (section_name, sizeof (section_name), ".nds32_jmptbl.00");
1130 snprintf (symbol_name, sizeof (symbol_name), "_nds32_jmptbl_00");
1131
1132 fprintf (asm_out_file, "\t! ....................................\n");
1133 nds32_emit_section_head_template (section_name, symbol_name, 2, true);
1134 fprintf (asm_out_file, "\t.word\t%s\n",
1135 nds32_isr_vectors[0].func_name);
1136 nds32_emit_section_tail_template (symbol_name);
1137
1138 /* Emit vector_00 section. */
1139 snprintf (section_name, sizeof (section_name), ".nds32_vector.00");
1140 snprintf (symbol_name, sizeof (symbol_name), "_nds32_vector_00%s", vs_str);
1141 snprintf (reset_handler_name, sizeof (reset_handler_name),
1142 "_nds32_reset%s", vs_str);
1143
1144 fprintf (asm_out_file, "\t! ....................................\n");
1145 nds32_emit_section_head_template (section_name, symbol_name,
1146 floor_log2 (nds32_isr_vector_size), false);
1147 fprintf (asm_out_file, "\tj\t%s ! jump to reset handler\n",
1148 reset_handler_name);
1149 nds32_emit_section_tail_template (symbol_name);
1150
1151 /* Emit nmi handler section. */
1152 snprintf (section_name, sizeof (section_name), ".nds32_nmih");
1153 snprintf (symbol_name, sizeof (symbol_name), "_nds32_nmih");
1154
1155 fprintf (asm_out_file, "\t! ....................................\n");
1156 nds32_emit_section_head_template (section_name, symbol_name, 2, true);
1157 fprintf (asm_out_file, "\t.word\t%s\n",
1158 (strlen (nds32_isr_vectors[0].nmi_name) == 0)
1159 ? "0"
1160 : nds32_isr_vectors[0].nmi_name);
1161 nds32_emit_section_tail_template (symbol_name);
1162
1163 /* Emit warm handler section. */
1164 snprintf (section_name, sizeof (section_name), ".nds32_wrh");
1165 snprintf (symbol_name, sizeof (symbol_name), "_nds32_wrh");
1166
1167 fprintf (asm_out_file, "\t! ....................................\n");
1168 nds32_emit_section_head_template (section_name, symbol_name, 2, true);
1169 fprintf (asm_out_file, "\t.word\t%s\n",
1170 (strlen (nds32_isr_vectors[0].warm_name) == 0)
1171 ? "0"
1172 : nds32_isr_vectors[0].warm_name);
1173 nds32_emit_section_tail_template (symbol_name);
1174
1175 fprintf (asm_out_file, "\t! RESET HANDLER CONTENT - END !\n");
1176 }
1177
1178 /* Function for nds32_merge_decl_attributes() and nds32_insert_attributes()
1179 to check if there are any conflict isr-specific attributes being set.
1180 We need to check:
1181 1. Only 'save_all' or 'partial_save' in the attributes.
1182 2. Only 'nested', 'not_nested', or 'nested_ready' in the attributes.
1183 3. Only 'interrupt', 'exception', or 'reset' in the attributes. */
1184 static void
1185 nds32_check_isr_attrs_conflict (tree func_decl, tree func_attrs)
1186 {
1187 int save_all_p, partial_save_p;
1188 int nested_p, not_nested_p, nested_ready_p;
1189 int intr_p, excp_p, reset_p;
1190
1191 /* Initialize variables. */
1192 save_all_p = partial_save_p = 0;
1193 nested_p = not_nested_p = nested_ready_p = 0;
1194 intr_p = excp_p = reset_p = 0;
1195
1196 /* We must check at MOST one attribute to set save-reg. */
1197 if (lookup_attribute ("save_all", func_attrs))
1198 save_all_p = 1;
1199 if (lookup_attribute ("partial_save", func_attrs))
1200 partial_save_p = 1;
1201
1202 if ((save_all_p + partial_save_p) > 1)
1203 error ("multiple save reg attributes to function %qD", func_decl);
1204
1205 /* We must check at MOST one attribute to set nested-type. */
1206 if (lookup_attribute ("nested", func_attrs))
1207 nested_p = 1;
1208 if (lookup_attribute ("not_nested", func_attrs))
1209 not_nested_p = 1;
1210 if (lookup_attribute ("nested_ready", func_attrs))
1211 nested_ready_p = 1;
1212
1213 if ((nested_p + not_nested_p + nested_ready_p) > 1)
1214 error ("multiple nested types attributes to function %qD", func_decl);
1215
1216 /* We must check at MOST one attribute to
1217 set interrupt/exception/reset. */
1218 if (lookup_attribute ("interrupt", func_attrs))
1219 intr_p = 1;
1220 if (lookup_attribute ("exception", func_attrs))
1221 excp_p = 1;
1222 if (lookup_attribute ("reset", func_attrs))
1223 reset_p = 1;
1224
1225 if ((intr_p + excp_p + reset_p) > 1)
1226 error ("multiple interrupt attributes to function %qD", func_decl);
1227 }
1228
1229 /* Function to construct isr vectors information array.
1230 We DO NOT HAVE TO check if the attributes are valid
1231 because those works are supposed to be done on
1232 nds32_merge_decl_attributes() and nds32_insert_attributes(). */
1233 static void
1234 nds32_construct_isr_vectors_information (tree func_attrs,
1235 const char *func_name)
1236 {
1237 tree save_all, partial_save;
1238 tree nested, not_nested, nested_ready;
1239 tree intr, excp, reset;
1240
1241 save_all = lookup_attribute ("save_all", func_attrs);
1242 partial_save = lookup_attribute ("partial_save", func_attrs);
1243
1244 nested = lookup_attribute ("nested", func_attrs);
1245 not_nested = lookup_attribute ("not_nested", func_attrs);
1246 nested_ready = lookup_attribute ("nested_ready", func_attrs);
1247
1248 intr = lookup_attribute ("interrupt", func_attrs);
1249 excp = lookup_attribute ("exception", func_attrs);
1250 reset = lookup_attribute ("reset", func_attrs);
1251
1252 /* If there is no interrupt/exception/reset, we can return immediately. */
1253 if (!intr && !excp && !reset)
1254 return;
1255
1256 /* If we are here, either we have interrupt/exception,
1257 or reset attribute. */
1258 if (intr || excp)
1259 {
1260 tree id_list;
1261
1262 /* Prepare id list so that we can traverse and set vector id. */
1263 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
1264
1265 while (id_list)
1266 {
1267 tree id;
1268 int vector_id;
1269 unsigned int vector_number_offset;
1270
1271 /* The way to handle interrupt or exception is the same,
1272 we just need to take care of actual vector number.
1273 For interrupt(0..63), the actual vector number is (9..72).
1274 For exception(1..8), the actual vector number is (1..8). */
1275 vector_number_offset = (intr) ? (9) : (0);
1276
1277 /* Pick up each vector id value. */
1278 id = TREE_VALUE (id_list);
1279 /* Add vector_number_offset to get actual vector number. */
1280 vector_id = TREE_INT_CST_LOW (id) + vector_number_offset;
1281
1282 /* Enable corresponding vector and set function name. */
1283 nds32_isr_vectors[vector_id].category = (intr)
1284 ? (NDS32_ISR_INTERRUPT)
1285 : (NDS32_ISR_EXCEPTION);
1286 strcpy (nds32_isr_vectors[vector_id].func_name, func_name);
1287
1288 /* Set register saving scheme. */
1289 if (save_all)
1290 nds32_isr_vectors[vector_id].save_reg = NDS32_SAVE_ALL;
1291 else if (partial_save)
1292 nds32_isr_vectors[vector_id].save_reg = NDS32_PARTIAL_SAVE;
1293
1294 /* Set nested type. */
1295 if (nested)
1296 nds32_isr_vectors[vector_id].nested_type = NDS32_NESTED;
1297 else if (not_nested)
1298 nds32_isr_vectors[vector_id].nested_type = NDS32_NOT_NESTED;
1299 else if (nested_ready)
1300 nds32_isr_vectors[vector_id].nested_type = NDS32_NESTED_READY;
1301
1302 /* Advance to next id. */
1303 id_list = TREE_CHAIN (id_list);
1304 }
1305 }
1306 else
1307 {
1308 tree id_list;
1309 tree id;
1310 tree nmi, warm;
1311
1312 /* Deal with reset attribute. Its vector number is always 0. */
1313 nds32_isr_vectors[0].category = NDS32_ISR_RESET;
1314
1315 /* Prepare id_list and identify id value so that
1316 we can set total number of vectors. */
1317 id_list = TREE_VALUE (reset);
1318 id = TREE_VALUE (id_list);
1319
1320 /* The total vectors = interrupt + exception numbers + reset.
1321 There are 8 exception and 1 reset in nds32 architecture. */
1322 nds32_isr_vectors[0].total_n_vectors = TREE_INT_CST_LOW (id) + 8 + 1;
1323 strcpy (nds32_isr_vectors[0].func_name, func_name);
1324
1325 /* Retrieve nmi and warm function. */
1326 nmi = lookup_attribute ("nmi", func_attrs);
1327 warm = lookup_attribute ("warm", func_attrs);
1328
1329 if (nmi != NULL_TREE)
1330 {
1331 tree nmi_func_list;
1332 tree nmi_func;
1333
1334 nmi_func_list = TREE_VALUE (nmi);
1335 nmi_func = TREE_VALUE (nmi_func_list);
1336
1337 /* Record nmi function name. */
1338 strcpy (nds32_isr_vectors[0].nmi_name,
1339 IDENTIFIER_POINTER (nmi_func));
1340 }
1341
1342 if (warm != NULL_TREE)
1343 {
1344 tree warm_func_list;
1345 tree warm_func;
1346
1347 warm_func_list = TREE_VALUE (warm);
1348 warm_func = TREE_VALUE (warm_func_list);
1349
1350 /* Record warm function name. */
1351 strcpy (nds32_isr_vectors[0].warm_name,
1352 IDENTIFIER_POINTER (warm_func));
1353 }
1354 }
1355 }
1356
1357 /* Function that may creates more instructions
1358 for large value on adjusting stack pointer.
1359
1360 In nds32 target, 'addi' can be used for stack pointer
1361 adjustment in prologue/epilogue stage.
1362 However, sometimes there are too many local variables so that
1363 the adjustment value is not able to be fit in the 'addi' instruction.
1364 One solution is to move value into a register
1365 and then use 'add' instruction.
1366 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
1367 Also, we need to return zero for sp adjustment so that
1368 proglogue/epilogue knows there is no need to create 'addi' instruction. */
1369 static int
1370 nds32_force_addi_stack_int (int full_value)
1371 {
1372 int adjust_value;
1373
1374 rtx tmp_reg;
1375 rtx sp_adjust_insn;
1376
1377 if (!satisfies_constraint_Is15 (GEN_INT (full_value)))
1378 {
1379 /* The value is not able to fit in single addi instruction.
1380 Create more instructions of moving value into a register
1381 and then add stack pointer with it. */
1382
1383 /* $r15 is going to be temporary register to hold the value. */
1384 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
1385
1386 /* Create one more instruction to move value
1387 into the temporary register. */
1388 emit_move_insn (tmp_reg, GEN_INT (full_value));
1389
1390 /* Create new 'add' rtx. */
1391 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
1392 stack_pointer_rtx,
1393 tmp_reg);
1394 /* Emit rtx into insn list and receive its transformed insn rtx. */
1395 sp_adjust_insn = emit_insn (sp_adjust_insn);
1396
1397 /* At prologue, we need to tell GCC that this is frame related insn,
1398 so that we can consider this instruction to output debug information.
1399 If full_value is NEGATIVE, it means this function
1400 is invoked by expand_prologue. */
1401 if (full_value < 0)
1402 {
1403 /* Because (tmp_reg <- full_value) may be split into two
1404 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
1405 We need to construct another (sp <- sp + full_value)
1406 and then insert it into sp_adjust_insn's reg note to
1407 represent a frame related expression.
1408 GCC knows how to refer it and output debug information. */
1409
1410 rtx plus_rtx;
1411 rtx set_rtx;
1412
1413 plus_rtx = plus_constant (Pmode, stack_pointer_rtx, full_value);
1414 set_rtx = gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_rtx);
1415 add_reg_note (sp_adjust_insn, REG_FRAME_RELATED_EXPR, set_rtx);
1416
1417 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
1418 }
1419
1420 /* We have used alternative way to adjust stack pointer value.
1421 Return zero so that prologue/epilogue
1422 will not generate other instructions. */
1423 return 0;
1424 }
1425 else
1426 {
1427 /* The value is able to fit in addi instruction.
1428 However, remember to make it to be positive value
1429 because we want to return 'adjustment' result. */
1430 adjust_value = (full_value < 0) ? (-full_value) : (full_value);
1431
1432 return adjust_value;
1433 }
1434 }
1435
1436 /* Return true if MODE/TYPE need double word alignment. */
1437 static bool
1438 nds32_needs_double_word_align (enum machine_mode mode, const_tree type)
1439 {
1440 unsigned int align;
1441
1442 /* Pick up the alignment according to the mode or type. */
1443 align = NDS32_MODE_TYPE_ALIGN (mode, type);
1444
1445 return (align > PARM_BOUNDARY);
1446 }
1447
1448 /* Return true if FUNC is a naked function. */
1449 static bool
1450 nds32_naked_function_p (tree func)
1451 {
1452 tree t;
1453
1454 if (TREE_CODE (func) != FUNCTION_DECL)
1455 abort ();
1456
1457 t = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
1458
1459 return (t != NULL_TREE);
1460 }
1461
1462 /* Function that check if 'X' is a valid address register.
1463 The variable 'STRICT' is very important to
1464 make decision for register number.
1465
1466 STRICT : true
1467 => We are in reload pass or after reload pass.
1468 The register number should be strictly limited in general registers.
1469
1470 STRICT : false
1471 => Before reload pass, we are free to use any register number. */
1472 static bool
1473 nds32_address_register_rtx_p (rtx x, bool strict)
1474 {
1475 int regno;
1476
1477 if (GET_CODE (x) != REG)
1478 return false;
1479
1480 regno = REGNO (x);
1481
1482 if (strict)
1483 return REGNO_OK_FOR_BASE_P (regno);
1484 else
1485 return true;
1486 }
1487
1488 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1489
1490 OUTER_MODE : Machine mode of outer address rtx.
1491 INDEX : Check if this rtx is valid to be a index for address.
1492 STRICT : If it is true, we are in reload pass or after reload pass. */
1493 static bool
1494 nds32_legitimate_index_p (enum machine_mode outer_mode,
1495 rtx index,
1496 bool strict)
1497 {
1498 int regno;
1499 rtx op0;
1500 rtx op1;
1501
1502 switch (GET_CODE (index))
1503 {
1504 case REG:
1505 regno = REGNO (index);
1506 /* If we are in reload pass or after reload pass,
1507 we need to limit it to general register. */
1508 if (strict)
1509 return REGNO_OK_FOR_INDEX_P (regno);
1510 else
1511 return true;
1512
1513 case CONST_INT:
1514 /* The alignment of the integer value is determined by 'outer_mode'. */
1515 if (GET_MODE_SIZE (outer_mode) == 1)
1516 {
1517 /* Further check if the value is legal for the 'outer_mode'. */
1518 if (!satisfies_constraint_Is15 (index))
1519 return false;
1520
1521 /* Pass all test, the value is valid, return true. */
1522 return true;
1523 }
1524 if (GET_MODE_SIZE (outer_mode) == 2
1525 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index)))
1526 {
1527 /* Further check if the value is legal for the 'outer_mode'. */
1528 if (!satisfies_constraint_Is16 (index))
1529 return false;
1530
1531 /* Pass all test, the value is valid, return true. */
1532 return true;
1533 }
1534 if (GET_MODE_SIZE (outer_mode) == 4
1535 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1536 {
1537 /* Further check if the value is legal for the 'outer_mode'. */
1538 if (!satisfies_constraint_Is17 (index))
1539 return false;
1540
1541 /* Pass all test, the value is valid, return true. */
1542 return true;
1543 }
1544 if (GET_MODE_SIZE (outer_mode) == 8
1545 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1546 {
1547 /* Further check if the value is legal for the 'outer_mode'. */
1548 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index) + 4,
1549 SImode)))
1550 return false;
1551
1552 /* Pass all test, the value is valid, return true. */
1553 return true;
1554 }
1555
1556 return false;
1557
1558 case MULT:
1559 op0 = XEXP (index, 0);
1560 op1 = XEXP (index, 1);
1561
1562 if (REG_P (op0) && CONST_INT_P (op1))
1563 {
1564 int multiplier;
1565 multiplier = INTVAL (op1);
1566
1567 /* We only allow (mult reg const_int_1)
1568 or (mult reg const_int_2) or (mult reg const_int_4). */
1569 if (multiplier != 1 && multiplier != 2 && multiplier != 4)
1570 return false;
1571
1572 regno = REGNO (op0);
1573 /* Limit it in general registers if we are
1574 in reload pass or after reload pass. */
1575 if(strict)
1576 return REGNO_OK_FOR_INDEX_P (regno);
1577 else
1578 return true;
1579 }
1580
1581 return false;
1582
1583 case ASHIFT:
1584 op0 = XEXP (index, 0);
1585 op1 = XEXP (index, 1);
1586
1587 if (REG_P (op0) && CONST_INT_P (op1))
1588 {
1589 int sv;
1590 /* op1 is already the sv value for use to do left shift. */
1591 sv = INTVAL (op1);
1592
1593 /* We only allow (ashift reg const_int_0)
1594 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1595 if (sv != 0 && sv != 1 && sv !=2)
1596 return false;
1597
1598 regno = REGNO (op0);
1599 /* Limit it in general registers if we are
1600 in reload pass or after reload pass. */
1601 if(strict)
1602 return REGNO_OK_FOR_INDEX_P (regno);
1603 else
1604 return true;
1605 }
1606
1607 return false;
1608
1609 default:
1610 return false;
1611 }
1612 }
1613
1614 /* Function to expand builtin function for
1615 '[(unspec_volatile [(reg)])]'. */
1616 static rtx
1617 nds32_expand_builtin_null_ftype_reg (enum insn_code icode,
1618 tree exp, rtx target)
1619 {
1620 /* Mapping:
1621 ops[0] <--> value0 <--> arg0 */
1622 struct expand_operand ops[1];
1623 tree arg0;
1624 rtx value0;
1625
1626 /* Grab the incoming arguments and extract its rtx. */
1627 arg0 = CALL_EXPR_ARG (exp, 0);
1628 value0 = expand_normal (arg0);
1629
1630 /* Create operands. */
1631 create_input_operand (&ops[0], value0, TYPE_MODE (TREE_TYPE (arg0)));
1632
1633 /* Emit new instruction. */
1634 if (!maybe_expand_insn (icode, 1, ops))
1635 error ("invalid argument to built-in function");
1636
1637 return target;
1638 }
1639
1640 /* Function to expand builtin function for
1641 '[(set (reg) (unspec_volatile [(imm)]))]'. */
1642 static rtx
1643 nds32_expand_builtin_reg_ftype_imm (enum insn_code icode,
1644 tree exp, rtx target)
1645 {
1646 /* Mapping:
1647 ops[0] <--> target <--> exp
1648 ops[1] <--> value0 <--> arg0 */
1649 struct expand_operand ops[2];
1650 tree arg0;
1651 rtx value0;
1652
1653 /* Grab the incoming arguments and extract its rtx. */
1654 arg0 = CALL_EXPR_ARG (exp, 0);
1655 value0 = expand_normal (arg0);
1656
1657 /* Create operands. */
1658 create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
1659 create_input_operand (&ops[1], value0, TYPE_MODE (TREE_TYPE (arg0)));
1660
1661 /* Emit new instruction. */
1662 if (!maybe_expand_insn (icode, 2, ops))
1663 error ("invalid argument to built-in function");
1664
1665 return target;
1666 }
1667
1668 /* Function to expand builtin function for
1669 '[(unspec_volatile [(reg) (imm)])]' pattern. */
1670 static rtx
1671 nds32_expand_builtin_null_ftype_reg_imm (enum insn_code icode,
1672 tree exp, rtx target)
1673 {
1674 /* Mapping:
1675 ops[0] <--> value0 <--> arg0
1676 ops[1] <--> value1 <--> arg1 */
1677 struct expand_operand ops[2];
1678 tree arg0, arg1;
1679 rtx value0, value1;
1680
1681 /* Grab the incoming arguments and extract its rtx. */
1682 arg0 = CALL_EXPR_ARG (exp, 0);
1683 arg1 = CALL_EXPR_ARG (exp, 1);
1684 value0 = expand_normal (arg0);
1685 value1 = expand_normal (arg1);
1686
1687 /* Create operands. */
1688 create_input_operand (&ops[0], value0, TYPE_MODE (TREE_TYPE (arg0)));
1689 create_input_operand (&ops[1], value1, TYPE_MODE (TREE_TYPE (arg1)));
1690
1691 /* Emit new instruction. */
1692 if (!maybe_expand_insn (icode, 2, ops))
1693 error ("invalid argument to built-in function");
1694
1695 return target;
1696 }
1697
1698 /* A helper function to return character based on byte size. */
1699 static char
1700 nds32_byte_to_size (int byte)
1701 {
1702 switch (byte)
1703 {
1704 case 4:
1705 return 'w';
1706 case 2:
1707 return 'h';
1708 case 1:
1709 return 'b';
1710 default:
1711 /* Normally it should not be here. */
1712 gcc_unreachable ();
1713 }
1714 }
1715
1716 /* A helper function to check if this function should contain prologue. */
1717 static int
1718 nds32_have_prologue_p (void)
1719 {
1720 int i;
1721
1722 for (i = 0; i < 28; i++)
1723 if (NDS32_REQUIRED_CALLEE_SAVED_P (i))
1724 return 1;
1725
1726 return (flag_pic
1727 || NDS32_REQUIRED_CALLEE_SAVED_P (FP_REGNUM)
1728 || NDS32_REQUIRED_CALLEE_SAVED_P (LP_REGNUM));
1729 }
1730
1731 /* ------------------------------------------------------------------------ */
1732
1733 /* PART 3: Implement target hook stuff definitions. */
1734 \f
1735 /* Register Classes. */
1736
1737 static unsigned char
1738 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1739 enum machine_mode mode)
1740 {
1741 /* Return the maximum number of consecutive registers
1742 needed to represent "mode" in a register of "rclass". */
1743 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
1744 }
1745
1746 static int
1747 nds32_register_priority (int hard_regno)
1748 {
1749 /* Encourage to use r0-r7 for LRA when optimize for size. */
1750 if (optimize_size && hard_regno < 8)
1751 return 4;
1752 return 3;
1753 }
1754
1755 \f
1756 /* Stack Layout and Calling Conventions. */
1757
1758 /* There are three kinds of pointer concepts using in GCC compiler:
1759
1760 frame pointer: A pointer to the first location of local variables.
1761 stack pointer: A pointer to the top of a stack frame.
1762 argument pointer: A pointer to the incoming arguments.
1763
1764 In nds32 target calling convention, we are using 8-byte alignment.
1765 Besides, we would like to have each stack frame of a function includes:
1766
1767 [Block A]
1768 1. previous hard frame pointer
1769 2. return address
1770 3. callee-saved registers
1771 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1772 and save it at
1773 cfun->machine->callee_saved_area_padding_bytes)
1774
1775 [Block B]
1776 1. local variables
1777 2. spilling location
1778 3. <padding bytes> (it will be calculated by GCC itself)
1779 4. incoming arguments
1780 5. <padding bytes> (it will be calculated by GCC itself)
1781
1782 [Block C]
1783 1. <padding bytes> (it will be calculated by GCC itself)
1784 2. outgoing arguments
1785
1786 We 'wrap' these blocks together with
1787 hard frame pointer ($r28) and stack pointer ($r31).
1788 By applying the basic frame/stack/argument pointers concept,
1789 the layout of a stack frame shoule be like this:
1790
1791 | |
1792 old stack pointer -> ----
1793 | | \
1794 | | saved arguments for
1795 | | vararg functions
1796 | | /
1797 hard frame pointer -> --
1798 & argument pointer | | \
1799 | | previous hardware frame pointer
1800 | | return address
1801 | | callee-saved registers
1802 | | /
1803 frame pointer -> --
1804 | | \
1805 | | local variables
1806 | | and incoming arguments
1807 | | /
1808 --
1809 | | \
1810 | | outgoing
1811 | | arguments
1812 | | /
1813 stack pointer -> ----
1814
1815 $SFP and $AP are used to represent frame pointer and arguments pointer,
1816 which will be both eliminated as hard frame pointer. */
1817
1818 /* -- Eliminating Frame Pointer and Arg Pointer. */
1819
1820 static bool nds32_can_eliminate (const int from_reg, const int to_reg)
1821 {
1822 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1823 return true;
1824
1825 if (from_reg == ARG_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1826 return true;
1827
1828 if (from_reg == FRAME_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1829 return true;
1830
1831 if (from_reg == FRAME_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1832 return true;
1833
1834 return false;
1835 }
1836
1837 /* -- Passing Arguments in Registers. */
1838
1839 static rtx
1840 nds32_function_arg (cumulative_args_t ca, enum machine_mode mode,
1841 const_tree type, bool named)
1842 {
1843 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1844
1845 /* The last time this hook is called,
1846 it is called with MODE == VOIDmode. */
1847 if (mode == VOIDmode)
1848 return NULL_RTX;
1849
1850 /* For nameless arguments, they are passed on the stack. */
1851 if (!named)
1852 return NULL_RTX;
1853
1854 /* If there are still registers available, return it. */
1855 if (NDS32_ARG_PASS_IN_REG_P (cum->reg_offset, mode, type))
1856 {
1857 /* Pick up the next available register number. */
1858 unsigned int regno;
1859
1860 regno = NDS32_AVAILABLE_REGNUM_FOR_ARG (cum->reg_offset, mode, type);
1861 return gen_rtx_REG (mode, regno);
1862 }
1863 else
1864 {
1865 /* No register available, return NULL_RTX.
1866 The compiler will use stack to pass argument instead. */
1867 return NULL_RTX;
1868 }
1869 }
1870
1871 static void
1872 nds32_function_arg_advance (cumulative_args_t ca, enum machine_mode mode,
1873 const_tree type, bool named)
1874 {
1875 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1876
1877 /* Advance next register for use.
1878 Only named argument could be advanced. */
1879 if (named)
1880 {
1881 cum->reg_offset
1882 = NDS32_AVAILABLE_REGNUM_FOR_ARG (cum->reg_offset, mode, type)
1883 - NDS32_GPR_ARG_FIRST_REGNUM
1884 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1885 }
1886 }
1887
1888 static unsigned int
1889 nds32_function_arg_boundary (enum machine_mode mode, const_tree type)
1890 {
1891 return (nds32_needs_double_word_align (mode, type)
1892 ? NDS32_DOUBLE_WORD_ALIGNMENT
1893 : PARM_BOUNDARY);
1894 }
1895
1896 /* -- How Scalar Function Values Are Returned. */
1897
1898 static rtx
1899 nds32_function_value (const_tree ret_type,
1900 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1901 bool outgoing ATTRIBUTE_UNUSED)
1902 {
1903 enum machine_mode mode;
1904 int unsignedp;
1905
1906 mode = TYPE_MODE (ret_type);
1907 unsignedp = TYPE_UNSIGNED (ret_type);
1908
1909 mode = promote_mode (ret_type, mode, &unsignedp);
1910
1911 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1912 }
1913
1914 static rtx
1915 nds32_libcall_value (enum machine_mode mode,
1916 const_rtx fun ATTRIBUTE_UNUSED)
1917 {
1918 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1919 }
1920
1921 static bool
1922 nds32_function_value_regno_p (const unsigned int regno)
1923 {
1924 return (regno == NDS32_GPR_RET_FIRST_REGNUM);
1925 }
1926
1927 /* -- Function Entry and Exit. */
1928
1929 /* The content produced from this function
1930 will be placed before prologue body. */
1931 static void
1932 nds32_asm_function_prologue (FILE *file,
1933 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1934 {
1935 int r;
1936 const char *func_name;
1937 tree attrs;
1938 tree name;
1939
1940 /* All stack frame information is supposed to be
1941 already computed when expanding prologue.
1942 The result is in cfun->machine.
1943 DO NOT call nds32_compute_stack_frame() here
1944 because it may corrupt the essential information. */
1945
1946 fprintf (file, "\t! BEGIN PROLOGUE\n");
1947 fprintf (file, "\t! fp needed: %d\n", frame_pointer_needed);
1948 fprintf (file, "\t! pretend_args: %d\n", cfun->machine->va_args_size);
1949 fprintf (file, "\t! local_size: %d\n", cfun->machine->local_size);
1950 fprintf (file, "\t! out_args_size: %d\n", cfun->machine->out_args_size);
1951
1952 /* Use df_regs_ever_live_p() to detect if the register
1953 is ever used in the current function. */
1954 fprintf (file, "\t! registers ever_live: ");
1955 for (r = 0; r < 32; r++)
1956 {
1957 if (df_regs_ever_live_p (r))
1958 fprintf (file, "%s, ", reg_names[r]);
1959 }
1960 fputc ('\n', file);
1961
1962 /* Display the attributes of this function. */
1963 fprintf (file, "\t! function attributes: ");
1964 /* Get the attributes tree list.
1965 Note that GCC builds attributes list with reverse order. */
1966 attrs = DECL_ATTRIBUTES (current_function_decl);
1967
1968 /* If there is no any attribute, print out "None". */
1969 if (!attrs)
1970 fprintf (file, "None");
1971
1972 /* If there are some attributes, try if we need to
1973 construct isr vector information. */
1974 func_name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
1975 nds32_construct_isr_vectors_information (attrs, func_name);
1976
1977 /* Display all attributes of this function. */
1978 while (attrs)
1979 {
1980 name = TREE_PURPOSE (attrs);
1981 fprintf (file, "%s ", IDENTIFIER_POINTER (name));
1982
1983 /* Pick up the next attribute. */
1984 attrs = TREE_CHAIN (attrs);
1985 }
1986 fputc ('\n', file);
1987 }
1988
1989 /* After rtl prologue has been expanded, this function is used. */
1990 static void
1991 nds32_asm_function_end_prologue (FILE *file)
1992 {
1993 fprintf (file, "\t! END PROLOGUE\n");
1994
1995 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1996 we can generate special directive: ".omit_fp_begin"
1997 to guide linker doing fp-as-gp optimization.
1998 However, for a naked function, which means
1999 it should not have prologue/epilogue,
2000 using fp-as-gp still requires saving $fp by push/pop behavior and
2001 there is no benefit to use fp-as-gp on such small function.
2002 So we need to make sure this function is NOT naked as well. */
2003 if (!frame_pointer_needed
2004 && !cfun->machine->naked_p
2005 && cfun->machine->fp_as_gp_p)
2006 {
2007 fprintf (file, "\t! ----------------------------------------\n");
2008 fprintf (file, "\t! Guide linker to do "
2009 "link time optimization: fp-as-gp\n");
2010 fprintf (file, "\t! We add one more instruction to "
2011 "initialize $fp near to $gp location.\n");
2012 fprintf (file, "\t! If linker fails to use fp-as-gp transformation,\n");
2013 fprintf (file, "\t! this extra instruction should be "
2014 "eliminated at link stage.\n");
2015 fprintf (file, "\t.omit_fp_begin\n");
2016 fprintf (file, "\tla\t$fp,_FP_BASE_\n");
2017 fprintf (file, "\t! ----------------------------------------\n");
2018 }
2019 }
2020
2021 /* Before rtl epilogue has been expanded, this function is used. */
2022 static void
2023 nds32_asm_function_begin_epilogue (FILE *file)
2024 {
2025 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
2026 we can generate special directive: ".omit_fp_end"
2027 to claim fp-as-gp optimization range.
2028 However, for a naked function,
2029 which means it should not have prologue/epilogue,
2030 using fp-as-gp still requires saving $fp by push/pop behavior and
2031 there is no benefit to use fp-as-gp on such small function.
2032 So we need to make sure this function is NOT naked as well. */
2033 if (!frame_pointer_needed
2034 && !cfun->machine->naked_p
2035 && cfun->machine->fp_as_gp_p)
2036 {
2037 fprintf (file, "\t! ----------------------------------------\n");
2038 fprintf (file, "\t! Claim the range of fp-as-gp "
2039 "link time optimization\n");
2040 fprintf (file, "\t.omit_fp_end\n");
2041 fprintf (file, "\t! ----------------------------------------\n");
2042 }
2043
2044 fprintf (file, "\t! BEGIN EPILOGUE\n");
2045 }
2046
2047 /* The content produced from this function
2048 will be placed after epilogue body. */
2049 static void
2050 nds32_asm_function_epilogue (FILE *file,
2051 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2052 {
2053 fprintf (file, "\t! END EPILOGUE\n");
2054 }
2055
2056 static void
2057 nds32_asm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
2058 HOST_WIDE_INT delta,
2059 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2060 tree function)
2061 {
2062 int this_regno;
2063
2064 /* Make sure unwind info is emitted for the thunk if needed. */
2065 final_start_function (emit_barrier (), file, 1);
2066
2067 this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
2068 ? 1
2069 : 0);
2070
2071 if (delta != 0)
2072 {
2073 if (satisfies_constraint_Is15 (GEN_INT (delta)))
2074 {
2075 fprintf (file, "\taddi\t$r%d, $r%d, %ld\n",
2076 this_regno, this_regno, delta);
2077 }
2078 else if (satisfies_constraint_Is20 (GEN_INT (delta)))
2079 {
2080 fprintf (file, "\tmovi\t$ta, %ld\n", delta);
2081 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
2082 }
2083 else
2084 {
2085 fprintf (file, "\tsethi\t$ta, hi20(%ld)\n", delta);
2086 fprintf (file, "\tori\t$ta, $ta, lo12(%ld)\n", delta);
2087 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
2088 }
2089 }
2090
2091 fprintf (file, "\tb\t");
2092 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2093 fprintf (file, "\n");
2094
2095 final_end_function ();
2096 }
2097
2098 /* -- Permitting tail calls. */
2099
2100 /* Determine whether we need to enable warning for function return check. */
2101 static bool
2102 nds32_warn_func_return (tree decl)
2103 {
2104 /* Naked functions are implemented entirely in assembly, including the
2105 return sequence, so suppress warnings about this. */
2106 return !nds32_naked_function_p (decl);
2107 }
2108
2109 \f
2110 /* Implementing the Varargs Macros. */
2111
2112 static bool
2113 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
2114 {
2115 /* Return true so that all the named arguments for FUNCTION_ARG have named=1.
2116 If return false, for the variadic function, all named arguments EXCEPT
2117 the last are treated as named. */
2118 return true;
2119 }
2120
2121 \f
2122 /* Trampolines for Nested Functions. */
2123
2124 static void
2125 nds32_asm_trampoline_template (FILE *f)
2126 {
2127 if (TARGET_REDUCED_REGS)
2128 {
2129 /* Trampoline is not supported on reduced-set registers yet. */
2130 sorry ("a nested function is not supported for reduced registers");
2131 }
2132 else
2133 {
2134 asm_fprintf (f, "\t! Trampoline code template\n");
2135 asm_fprintf (f, "\t! This code fragment will be copied "
2136 "into stack on demand\n");
2137
2138 asm_fprintf (f, "\tmfusr\t$r16,$pc\n");
2139 asm_fprintf (f, "\tlwi\t$r15,[$r16 + 20] "
2140 "! load nested function address\n");
2141 asm_fprintf (f, "\tlwi\t$r16,[$r16 + 16] "
2142 "! load chain_value\n");
2143 asm_fprintf (f, "\tjr\t$r15\n");
2144 }
2145
2146 /* Preserve space ($pc + 16) for saving chain_value,
2147 nds32_trampoline_init will fill the value in this slot. */
2148 asm_fprintf (f, "\t! space for saving chain_value\n");
2149 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
2150
2151 /* Preserve space ($pc + 20) for saving nested function address,
2152 nds32_trampoline_init will fill the value in this slot. */
2153 asm_fprintf (f, "\t! space for saving nested function address\n");
2154 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
2155 }
2156
2157 /* Emit RTL insns to initialize the variable parts of a trampoline. */
2158 static void
2159 nds32_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2160 {
2161 int i;
2162
2163 /* Nested function address. */
2164 rtx fnaddr;
2165 /* The memory rtx that is going to
2166 be filled with chain_value. */
2167 rtx chain_value_mem;
2168 /* The memory rtx that is going to
2169 be filled with nested function address. */
2170 rtx nested_func_mem;
2171
2172 /* Start address of trampoline code in stack, for doing cache sync. */
2173 rtx sync_cache_addr;
2174 /* Temporary register for sync instruction. */
2175 rtx tmp_reg;
2176 /* Instruction-cache sync instruction,
2177 requesting an argument as starting address. */
2178 rtx isync_insn;
2179 /* For convenience reason of doing comparison. */
2180 int tramp_align_in_bytes;
2181
2182 /* Trampoline is not supported on reduced-set registers yet. */
2183 if (TARGET_REDUCED_REGS)
2184 sorry ("a nested function is not supported for reduced registers");
2185
2186 /* STEP 1: Copy trampoline code template into stack,
2187 fill up essential data into stack. */
2188
2189 /* Extract nested function address rtx. */
2190 fnaddr = XEXP (DECL_RTL (fndecl), 0);
2191
2192 /* m_tramp is memory rtx that is going to be filled with trampoline code.
2193 We have nds32_asm_trampoline_template() to emit template pattern. */
2194 emit_block_move (m_tramp, assemble_trampoline_template (),
2195 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
2196
2197 /* After copying trampoline code into stack,
2198 fill chain_value into stack. */
2199 chain_value_mem = adjust_address (m_tramp, SImode, 16);
2200 emit_move_insn (chain_value_mem, chain_value);
2201 /* After copying trampoline code int stack,
2202 fill nested function address into stack. */
2203 nested_func_mem = adjust_address (m_tramp, SImode, 20);
2204 emit_move_insn (nested_func_mem, fnaddr);
2205
2206 /* STEP 2: Sync instruction-cache. */
2207
2208 /* We have successfully filled trampoline code into stack.
2209 However, in order to execute code in stack correctly,
2210 we must sync instruction cache. */
2211 sync_cache_addr = XEXP (m_tramp, 0);
2212 tmp_reg = gen_reg_rtx (SImode);
2213 isync_insn = gen_unspec_volatile_isync (tmp_reg);
2214
2215 /* Because nds32_cache_block_size is in bytes,
2216 we get trampoline alignment in bytes for convenient comparison. */
2217 tramp_align_in_bytes = TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT;
2218
2219 if (tramp_align_in_bytes >= nds32_cache_block_size
2220 && (tramp_align_in_bytes % nds32_cache_block_size) == 0)
2221 {
2222 /* Under this condition, the starting address of trampoline
2223 must be aligned to the starting address of each cache block
2224 and we do not have to worry about cross-boundary issue. */
2225 for (i = 0;
2226 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
2227 / nds32_cache_block_size;
2228 i++)
2229 {
2230 emit_move_insn (tmp_reg,
2231 plus_constant (Pmode, sync_cache_addr,
2232 nds32_cache_block_size * i));
2233 emit_insn (isync_insn);
2234 }
2235 }
2236 else if (TRAMPOLINE_SIZE > nds32_cache_block_size)
2237 {
2238 /* The starting address of trampoline code
2239 may not be aligned to the cache block,
2240 so the trampoline code may be across two cache block.
2241 We need to sync the last element, which is 4-byte size,
2242 of trampoline template. */
2243 for (i = 0;
2244 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
2245 / nds32_cache_block_size;
2246 i++)
2247 {
2248 emit_move_insn (tmp_reg,
2249 plus_constant (Pmode, sync_cache_addr,
2250 nds32_cache_block_size * i));
2251 emit_insn (isync_insn);
2252 }
2253
2254 /* The last element of trampoline template is 4-byte size. */
2255 emit_move_insn (tmp_reg,
2256 plus_constant (Pmode, sync_cache_addr,
2257 TRAMPOLINE_SIZE - 4));
2258 emit_insn (isync_insn);
2259 }
2260 else
2261 {
2262 /* This is the simplest case.
2263 Because TRAMPOLINE_SIZE is less than or
2264 equal to nds32_cache_block_size,
2265 we can just sync start address and
2266 the last element of trampoline code. */
2267
2268 /* Sync starting address of tampoline code. */
2269 emit_move_insn (tmp_reg, sync_cache_addr);
2270 emit_insn (isync_insn);
2271 /* Sync the last element, which is 4-byte size,
2272 of trampoline template. */
2273 emit_move_insn (tmp_reg,
2274 plus_constant (Pmode, sync_cache_addr,
2275 TRAMPOLINE_SIZE - 4));
2276 emit_insn (isync_insn);
2277 }
2278
2279 /* Set instruction serialization barrier
2280 to guarantee the correct operations. */
2281 emit_insn (gen_unspec_volatile_isb ());
2282 }
2283
2284 \f
2285 /* Addressing Modes. */
2286
2287 static bool
2288 nds32_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
2289 {
2290 /* For (mem:DI addr) or (mem:DF addr) case,
2291 we only allow 'addr' to be [reg], [symbol_ref],
2292 [const], or [reg + const_int] pattern. */
2293 if (mode == DImode || mode == DFmode)
2294 {
2295 /* Allow [Reg + const_int] addressing mode. */
2296 if (GET_CODE (x) == PLUS)
2297 {
2298 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2299 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict)
2300 && CONST_INT_P (XEXP (x, 1)))
2301 return true;
2302
2303 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2304 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict)
2305 && CONST_INT_P (XEXP (x, 0)))
2306 return true;
2307 }
2308
2309 /* Now check [reg], [symbol_ref], and [const]. */
2310 if (GET_CODE (x) != REG
2311 && GET_CODE (x) != SYMBOL_REF
2312 && GET_CODE (x) != CONST)
2313 return false;
2314 }
2315
2316 /* Check if 'x' is a valid address. */
2317 switch (GET_CODE (x))
2318 {
2319 case REG:
2320 /* (mem (reg A)) => [Ra] */
2321 return nds32_address_register_rtx_p (x, strict);
2322
2323 case SYMBOL_REF:
2324
2325 if (!TARGET_GP_DIRECT
2326 && (reload_completed
2327 || reload_in_progress
2328 || lra_in_progress))
2329 return false;
2330
2331 /* (mem (symbol_ref A)) => [symbol_ref] */
2332 return !currently_expanding_to_rtl;
2333
2334 case CONST:
2335
2336 if (!TARGET_GP_DIRECT
2337 && (reload_completed
2338 || reload_in_progress
2339 || lra_in_progress))
2340 return false;
2341
2342 /* (mem (const (...)))
2343 => [ + const_addr ], where const_addr = symbol_ref + const_int */
2344 if (GET_CODE (XEXP (x, 0)) == PLUS)
2345 {
2346 rtx plus_op = XEXP (x, 0);
2347
2348 rtx op0 = XEXP (plus_op, 0);
2349 rtx op1 = XEXP (plus_op, 1);
2350
2351 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
2352 return true;
2353 else
2354 return false;
2355 }
2356
2357 return false;
2358
2359 case POST_MODIFY:
2360 /* (mem (post_modify (reg) (plus (reg) (reg))))
2361 => [Ra], Rb */
2362 /* (mem (post_modify (reg) (plus (reg) (const_int))))
2363 => [Ra], const_int */
2364 if (GET_CODE (XEXP (x, 0)) == REG
2365 && GET_CODE (XEXP (x, 1)) == PLUS)
2366 {
2367 rtx plus_op = XEXP (x, 1);
2368
2369 rtx op0 = XEXP (plus_op, 0);
2370 rtx op1 = XEXP (plus_op, 1);
2371
2372 if (nds32_address_register_rtx_p (op0, strict)
2373 && nds32_legitimate_index_p (mode, op1, strict))
2374 return true;
2375 else
2376 return false;
2377 }
2378
2379 return false;
2380
2381 case POST_INC:
2382 case POST_DEC:
2383 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2384 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2385 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2386 We only need to deal with register Ra. */
2387 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
2388 return true;
2389 else
2390 return false;
2391
2392 case PLUS:
2393 /* (mem (plus reg const_int))
2394 => [Ra + imm] */
2395 /* (mem (plus reg reg))
2396 => [Ra + Rb] */
2397 /* (mem (plus (mult reg const_int) reg))
2398 => [Ra + Rb << sv] */
2399 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2400 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict))
2401 return true;
2402 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2403 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict))
2404 return true;
2405 else
2406 return false;
2407
2408 case LO_SUM:
2409 if (!TARGET_GP_DIRECT)
2410 return true;
2411
2412 default:
2413 return false;
2414 }
2415 }
2416
2417 \f
2418 /* Describing Relative Costs of Operations. */
2419
2420 static int nds32_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2421 reg_class_t from,
2422 reg_class_t to)
2423 {
2424 if (from == HIGH_REGS || to == HIGH_REGS)
2425 return 6;
2426
2427 return 2;
2428 }
2429
2430 static int nds32_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2431 reg_class_t rclass ATTRIBUTE_UNUSED,
2432 bool in ATTRIBUTE_UNUSED)
2433 {
2434 return 8;
2435 }
2436
2437 /* This target hook describes the relative costs of RTL expressions.
2438 Return 'true' when all subexpressions of x have been processed.
2439 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
2440 Refer to gcc/rtlanal.c for more information. */
2441 static bool
2442 nds32_rtx_costs (rtx x,
2443 int code,
2444 int outer_code,
2445 int opno ATTRIBUTE_UNUSED,
2446 int *total,
2447 bool speed)
2448 {
2449 /* According to 'speed', goto suitable cost model section. */
2450 if (speed)
2451 goto performance_cost;
2452 else
2453 goto size_cost;
2454
2455
2456 performance_cost:
2457 /* This is section for performance cost model. */
2458
2459 /* In gcc/rtl.h, the default value of COSTS_N_INSNS(N) is N*4.
2460 We treat it as 4-cycle cost for each instruction
2461 under performance consideration. */
2462 switch (code)
2463 {
2464 case SET:
2465 /* For 'SET' rtx, we need to return false
2466 so that it can recursively calculate costs. */
2467 return false;
2468
2469 case USE:
2470 /* Used in combine.c as a marker. */
2471 *total = 0;
2472 break;
2473
2474 case MULT:
2475 *total = COSTS_N_INSNS (1);
2476 break;
2477
2478 case DIV:
2479 case UDIV:
2480 case MOD:
2481 case UMOD:
2482 *total = COSTS_N_INSNS (7);
2483 break;
2484
2485 default:
2486 *total = COSTS_N_INSNS (1);
2487 break;
2488 }
2489
2490 return true;
2491
2492
2493 size_cost:
2494 /* This is section for size cost model. */
2495
2496 /* In gcc/rtl.h, the default value of COSTS_N_INSNS(N) is N*4.
2497 We treat it as 4-byte cost for each instruction
2498 under code size consideration. */
2499 switch (code)
2500 {
2501 case SET:
2502 /* For 'SET' rtx, we need to return false
2503 so that it can recursively calculate costs. */
2504 return false;
2505
2506 case USE:
2507 /* Used in combine.c as a marker. */
2508 *total = 0;
2509 break;
2510
2511 case CONST_INT:
2512 /* All instructions involving constant operation
2513 need to be considered for cost evaluation. */
2514 if (outer_code == SET)
2515 {
2516 /* (set X imm5s), use movi55, 2-byte cost.
2517 (set X imm20s), use movi, 4-byte cost.
2518 (set X BIG_INT), use sethi/ori, 8-byte cost. */
2519 if (satisfies_constraint_Is05 (x))
2520 *total = COSTS_N_INSNS (1) - 2;
2521 else if (satisfies_constraint_Is20 (x))
2522 *total = COSTS_N_INSNS (1);
2523 else
2524 *total = COSTS_N_INSNS (2);
2525 }
2526 else if (outer_code == PLUS || outer_code == MINUS)
2527 {
2528 /* Possible addi333/subi333 or subi45/addi45, 2-byte cost.
2529 General case, cost 1 instruction with 4-byte. */
2530 if (satisfies_constraint_Iu05 (x))
2531 *total = COSTS_N_INSNS (1) - 2;
2532 else
2533 *total = COSTS_N_INSNS (1);
2534 }
2535 else if (outer_code == ASHIFT)
2536 {
2537 /* Possible slli333, 2-byte cost.
2538 General case, cost 1 instruction with 4-byte. */
2539 if (satisfies_constraint_Iu03 (x))
2540 *total = COSTS_N_INSNS (1) - 2;
2541 else
2542 *total = COSTS_N_INSNS (1);
2543 }
2544 else if (outer_code == ASHIFTRT || outer_code == LSHIFTRT)
2545 {
2546 /* Possible srai45 or srli45, 2-byte cost.
2547 General case, cost 1 instruction with 4-byte. */
2548 if (satisfies_constraint_Iu05 (x))
2549 *total = COSTS_N_INSNS (1) - 2;
2550 else
2551 *total = COSTS_N_INSNS (1);
2552 }
2553 else
2554 {
2555 /* For other cases, simply set it 4-byte cost. */
2556 *total = COSTS_N_INSNS (1);
2557 }
2558 break;
2559
2560 case CONST_DOUBLE:
2561 /* It requires high part and low part processing, set it 8-byte cost. */
2562 *total = COSTS_N_INSNS (2);
2563 break;
2564
2565 default:
2566 /* For other cases, generally we set it 4-byte cost
2567 and stop resurively traversing. */
2568 *total = COSTS_N_INSNS (1);
2569 break;
2570 }
2571
2572 return true;
2573 }
2574
2575 static int nds32_address_cost (rtx address,
2576 enum machine_mode mode ATTRIBUTE_UNUSED,
2577 addr_space_t as ATTRIBUTE_UNUSED,
2578 bool speed)
2579 {
2580 rtx plus0, plus1;
2581 enum rtx_code code;
2582
2583 code = GET_CODE (address);
2584
2585 /* According to 'speed', goto suitable cost model section. */
2586 if (speed)
2587 goto performance_cost;
2588 else
2589 goto size_cost;
2590
2591 performance_cost:
2592 /* This is section for performance cost model. */
2593
2594 /* FALLTHRU, currently we use same cost model as size_cost. */
2595
2596 size_cost:
2597 /* This is section for size cost model. */
2598
2599 switch (code)
2600 {
2601 case POST_MODIFY:
2602 case POST_INC:
2603 case POST_DEC:
2604 /* We encourage that rtx contains
2605 POST_MODIFY/POST_INC/POST_DEC behavior. */
2606 return 0;
2607
2608 case SYMBOL_REF:
2609 /* We can have gp-relative load/store for symbol_ref.
2610 Have it 4-byte cost. */
2611 return COSTS_N_INSNS (1);
2612
2613 case CONST:
2614 /* It is supposed to be the pattern (const (plus symbol_ref const_int)).
2615 Have it 4-byte cost. */
2616 return COSTS_N_INSNS (1);
2617
2618 case REG:
2619 /* Simply return 4-byte costs. */
2620 return COSTS_N_INSNS (1);
2621
2622 case PLUS:
2623 /* We do not need to check if the address is a legitimate address,
2624 because this hook is never called with an invalid address.
2625 But we better check the range of
2626 const_int value for cost, if it exists. */
2627 plus0 = XEXP (address, 0);
2628 plus1 = XEXP (address, 1);
2629
2630 if (REG_P (plus0) && CONST_INT_P (plus1))
2631 {
2632 /* If it is possible to be lwi333/swi333 form,
2633 make it 2-byte cost. */
2634 if (satisfies_constraint_Iu05 (plus1))
2635 return (COSTS_N_INSNS (1) - 2);
2636 else
2637 return COSTS_N_INSNS (1);
2638 }
2639
2640 /* For other 'plus' situation, make it cost 4-byte. */
2641 return COSTS_N_INSNS (1);
2642
2643 default:
2644 break;
2645 }
2646
2647 return COSTS_N_INSNS (4);
2648 }
2649
2650 \f
2651 /* Defining the Output Assembler Language. */
2652
2653 /* -- The Overall Framework of an Assembler File. */
2654
2655 static void
2656 nds32_asm_file_start (void)
2657 {
2658 int i;
2659
2660 default_file_start ();
2661
2662 /* Tell assembler which ABI we are using. */
2663 fprintf (asm_out_file, "\t! ABI version\n");
2664 fprintf (asm_out_file, "\t.abi_2\n");
2665
2666 /* Tell assembler that this asm code is generated by compiler. */
2667 fprintf (asm_out_file, "\t! This asm file is generated by compiler\n");
2668 fprintf (asm_out_file, "\t.flag\tverbatim\n");
2669 /* Give assembler the size of each vector for interrupt handler. */
2670 fprintf (asm_out_file, "\t! This vector size directive is required "
2671 "for checking inconsistency on interrupt handler\n");
2672 fprintf (asm_out_file, "\t.vec_size\t%d\n", nds32_isr_vector_size);
2673
2674 /* If user enables '-mforce-fp-as-gp' or compiles programs with -Os,
2675 the compiler may produce 'la $fp,_FP_BASE_' instruction
2676 at prologue for fp-as-gp optimization.
2677 We should emit weak reference of _FP_BASE_ to avoid undefined reference
2678 in case user does not pass '--relax' option to linker. */
2679 if (TARGET_FORCE_FP_AS_GP || optimize_size)
2680 {
2681 fprintf (asm_out_file, "\t! This weak reference is required to do "
2682 "fp-as-gp link time optimization\n");
2683 fprintf (asm_out_file, "\t.weak\t_FP_BASE_\n");
2684 }
2685 /* If user enables '-mex9', we should emit relaxation directive
2686 to tell linker that this file is allowed to do ex9 optimization. */
2687 if (TARGET_EX9)
2688 {
2689 fprintf (asm_out_file, "\t! This relaxation directive is required "
2690 "to do ex9 link time optimization\n");
2691 fprintf (asm_out_file, "\t.relax\tex9\n");
2692 }
2693
2694 fprintf (asm_out_file, "\t! ------------------------------------\n");
2695
2696 if (TARGET_ISA_V2)
2697 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V2");
2698 if (TARGET_ISA_V3)
2699 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3");
2700 if (TARGET_ISA_V3M)
2701 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3M");
2702
2703 fprintf (asm_out_file, "\t! Endian setting\t: %s\n",
2704 ((TARGET_BIG_ENDIAN) ? "big-endian"
2705 : "little-endian"));
2706
2707 fprintf (asm_out_file, "\t! ------------------------------------\n");
2708
2709 fprintf (asm_out_file, "\t! Use conditional move\t\t: %s\n",
2710 ((TARGET_CMOV) ? "Yes"
2711 : "No"));
2712 fprintf (asm_out_file, "\t! Use performance extension\t: %s\n",
2713 ((TARGET_PERF_EXT) ? "Yes"
2714 : "No"));
2715
2716 fprintf (asm_out_file, "\t! ------------------------------------\n");
2717
2718 fprintf (asm_out_file, "\t! V3PUSH instructions\t: %s\n",
2719 ((TARGET_V3PUSH) ? "Yes"
2720 : "No"));
2721 fprintf (asm_out_file, "\t! 16-bit instructions\t: %s\n",
2722 ((TARGET_16_BIT) ? "Yes"
2723 : "No"));
2724 fprintf (asm_out_file, "\t! GP base access\t: %s\n",
2725 ((TARGET_GP_DIRECT) ? "Yes"
2726 : "No"));
2727 fprintf (asm_out_file, "\t! Reduced registers set\t: %s\n",
2728 ((TARGET_REDUCED_REGS) ? "Yes"
2729 : "No"));
2730
2731 fprintf (asm_out_file, "\t! ------------------------------------\n");
2732
2733 if (optimize_size)
2734 fprintf (asm_out_file, "\t! Optimization level\t: -Os\n");
2735 else
2736 fprintf (asm_out_file, "\t! Optimization level\t: -O%d\n", optimize);
2737
2738 fprintf (asm_out_file, "\t! ------------------------------------\n");
2739
2740 fprintf (asm_out_file, "\t! Cache block size\t: %d\n",
2741 nds32_cache_block_size);
2742
2743 fprintf (asm_out_file, "\t! ------------------------------------\n");
2744
2745 /* Initialize isr vector information array before compiling functions. */
2746 for (i = 0; i < NDS32_N_ISR_VECTORS; i++)
2747 {
2748 nds32_isr_vectors[i].category = NDS32_ISR_NONE;
2749 strcpy (nds32_isr_vectors[i].func_name, "");
2750 nds32_isr_vectors[i].save_reg = NDS32_PARTIAL_SAVE;
2751 nds32_isr_vectors[i].nested_type = NDS32_NOT_NESTED;
2752 nds32_isr_vectors[i].total_n_vectors = 0;
2753 strcpy (nds32_isr_vectors[i].nmi_name, "");
2754 strcpy (nds32_isr_vectors[i].warm_name, "");
2755 }
2756 }
2757
2758 static void
2759 nds32_asm_file_end (void)
2760 {
2761 int i;
2762
2763 /* If all the vectors are NDS32_ISR_NONE, we can return immediately. */
2764 for (i = 0; i < NDS32_N_ISR_VECTORS; i++)
2765 if (nds32_isr_vectors[i].category != NDS32_ISR_NONE)
2766 break;
2767
2768 if (i == NDS32_N_ISR_VECTORS)
2769 return;
2770
2771 /* At least one vector is NOT NDS32_ISR_NONE,
2772 we should output isr vector information. */
2773 fprintf (asm_out_file, "\t! ------------------------------------\n");
2774 fprintf (asm_out_file, "\t! The isr vector information:\n");
2775 fprintf (asm_out_file, "\t! ------------------------------------\n");
2776
2777 /* Check reset handler first. Its vector number is always 0. */
2778 if (nds32_isr_vectors[0].category == NDS32_ISR_RESET)
2779 {
2780 nds32_emit_isr_reset_content ();
2781 fprintf (asm_out_file, "\t! ------------------------------------\n");
2782 }
2783
2784 /* Check other vectors, starting from vector number 1. */
2785 for (i = 1; i < NDS32_N_ISR_VECTORS; i++)
2786 {
2787 if (nds32_isr_vectors[i].category == NDS32_ISR_INTERRUPT
2788 || nds32_isr_vectors[i].category == NDS32_ISR_EXCEPTION)
2789 {
2790 /* Found one vector which is interupt or exception.
2791 Output its jmptbl and vector section content. */
2792 fprintf (asm_out_file, "\t! interrupt/exception vector %02d\n", i);
2793 fprintf (asm_out_file, "\t! ------------------------------------\n");
2794 nds32_emit_isr_jmptbl_section (i);
2795 fprintf (asm_out_file, "\t! ....................................\n");
2796 nds32_emit_isr_vector_section (i);
2797 fprintf (asm_out_file, "\t! ------------------------------------\n");
2798 }
2799 }
2800
2801 fprintf (asm_out_file, "\t! ------------------------------------\n");
2802 }
2803
2804 /* -- Output and Generation of Labels. */
2805
2806 static void
2807 nds32_asm_globalize_label (FILE *stream, const char *name)
2808 {
2809 fputs ("\t.global\t", stream);
2810 assemble_name (stream, name);
2811 fputs ("\n", stream);
2812 }
2813
2814 /* -- Output of Assembler Instructions. */
2815
2816 static void
2817 nds32_print_operand (FILE *stream, rtx x, int code)
2818 {
2819 int op_value;
2820
2821 switch (code)
2822 {
2823 case 0 :
2824 /* Do nothing special. */
2825 break;
2826
2827 case 'V':
2828 /* 'x' is supposed to be CONST_INT, get the value. */
2829 gcc_assert (CONST_INT_P (x));
2830 op_value = INTVAL (x);
2831
2832 /* According to the Andes architecture,
2833 the system/user register index range is 0 ~ 1023.
2834 In order to avoid conflict between user-specified-integer value
2835 and enum-specified-register value,
2836 the 'enum nds32_intrinsic_registers' value
2837 in nds32_intrinsic.h starts from 1024. */
2838 if (op_value < 1024 && op_value >= 0)
2839 {
2840 /* If user gives integer value directly (0~1023),
2841 we just print out the value. */
2842 fprintf (stream, "%d", op_value);
2843 }
2844 else if (op_value < 0
2845 || op_value >= ((int) ARRAY_SIZE (nds32_intrinsic_register_names)
2846 + 1024))
2847 {
2848 /* The enum index value for array size is out of range. */
2849 error ("intrinsic register index is out of range");
2850 }
2851 else
2852 {
2853 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2854 we can print out register name. Remember to substract 1024. */
2855 fprintf (stream, "%s",
2856 nds32_intrinsic_register_names[op_value - 1024]);
2857 }
2858
2859 /* No need to handle following process, so return immediately. */
2860 return;
2861
2862 default :
2863 /* Unknown flag. */
2864 output_operand_lossage ("invalid operand output code");
2865 break;
2866 }
2867
2868 switch (GET_CODE (x))
2869 {
2870 case LABEL_REF:
2871 case SYMBOL_REF:
2872 output_addr_const (stream, x);
2873 break;
2874
2875 case REG:
2876 /* Forbid using static chain register ($r16)
2877 on reduced-set registers configuration. */
2878 if (TARGET_REDUCED_REGS
2879 && REGNO (x) == STATIC_CHAIN_REGNUM)
2880 sorry ("a nested function is not supported for reduced registers");
2881
2882 /* Normal cases, print out register name. */
2883 fputs (reg_names[REGNO (x)], stream);
2884 break;
2885
2886 case MEM:
2887 output_address (XEXP (x, 0));
2888 break;
2889
2890 case CODE_LABEL:
2891 case CONST_INT:
2892 case CONST:
2893 output_addr_const (stream, x);
2894 break;
2895
2896 default:
2897 /* Generally, output_addr_const () is able to handle most cases.
2898 We want to see what CODE could appear,
2899 so we use gcc_unreachable() to stop it. */
2900 debug_rtx (x);
2901 gcc_unreachable ();
2902 break;
2903 }
2904 }
2905
2906 static void
2907 nds32_print_operand_address (FILE *stream, rtx x)
2908 {
2909 rtx op0, op1;
2910
2911 switch (GET_CODE (x))
2912 {
2913 case SYMBOL_REF:
2914 case CONST:
2915 /* [ + symbol_ref] */
2916 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2917 fputs ("[ + ", stream);
2918 output_addr_const (stream, x);
2919 fputs ("]", stream);
2920 break;
2921
2922 case REG:
2923 /* Forbid using static chain register ($r16)
2924 on reduced-set registers configuration. */
2925 if (TARGET_REDUCED_REGS
2926 && REGNO (x) == STATIC_CHAIN_REGNUM)
2927 sorry ("a nested function is not supported for reduced registers");
2928
2929 /* [Ra] */
2930 fprintf (stream, "[%s]", reg_names[REGNO (x)]);
2931 break;
2932
2933 case PLUS:
2934 op0 = XEXP (x, 0);
2935 op1 = XEXP (x, 1);
2936
2937 /* Checking op0, forbid using static chain register ($r16)
2938 on reduced-set registers configuration. */
2939 if (TARGET_REDUCED_REGS
2940 && REG_P (op0)
2941 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2942 sorry ("a nested function is not supported for reduced registers");
2943 /* Checking op1, forbid using static chain register ($r16)
2944 on reduced-set registers configuration. */
2945 if (TARGET_REDUCED_REGS
2946 && REG_P (op1)
2947 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2948 sorry ("a nested function is not supported for reduced registers");
2949
2950 if (REG_P (op0) && CONST_INT_P (op1))
2951 {
2952 /* [Ra + imm] */
2953 fprintf (stream, "[%s + (%d)]",
2954 reg_names[REGNO (op0)], (int)INTVAL (op1));
2955 }
2956 else if (REG_P (op0) && REG_P (op1))
2957 {
2958 /* [Ra + Rb] */
2959 fprintf (stream, "[%s + %s]",
2960 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2961 }
2962 else if (GET_CODE (op0) == MULT && REG_P (op1))
2963 {
2964 /* [Ra + Rb << sv]
2965 From observation, the pattern looks like:
2966 (plus:SI (mult:SI (reg:SI 58)
2967 (const_int 4 [0x4]))
2968 (reg/f:SI 57)) */
2969 int sv;
2970
2971 /* We need to set sv to output shift value. */
2972 if (INTVAL (XEXP (op0, 1)) == 1)
2973 sv = 0;
2974 else if (INTVAL (XEXP (op0, 1)) == 2)
2975 sv = 1;
2976 else if (INTVAL (XEXP (op0, 1)) == 4)
2977 sv = 2;
2978 else
2979 gcc_unreachable ();
2980
2981 fprintf (stream, "[%s + %s << %d]",
2982 reg_names[REGNO (op1)],
2983 reg_names[REGNO (XEXP (op0, 0))],
2984 sv);
2985 }
2986 else
2987 {
2988 /* The control flow is not supposed to be here. */
2989 debug_rtx (x);
2990 gcc_unreachable ();
2991 }
2992
2993 break;
2994
2995 case POST_MODIFY:
2996 /* (post_modify (regA) (plus (regA) (regB)))
2997 (post_modify (regA) (plus (regA) (const_int)))
2998 We would like to extract
2999 regA and regB (or const_int) from plus rtx. */
3000 op0 = XEXP (XEXP (x, 1), 0);
3001 op1 = XEXP (XEXP (x, 1), 1);
3002
3003 /* Checking op0, forbid using static chain register ($r16)
3004 on reduced-set registers configuration. */
3005 if (TARGET_REDUCED_REGS
3006 && REG_P (op0)
3007 && REGNO (op0) == STATIC_CHAIN_REGNUM)
3008 sorry ("a nested function is not supported for reduced registers");
3009 /* Checking op1, forbid using static chain register ($r16)
3010 on reduced-set registers configuration. */
3011 if (TARGET_REDUCED_REGS
3012 && REG_P (op1)
3013 && REGNO (op1) == STATIC_CHAIN_REGNUM)
3014 sorry ("a nested function is not supported for reduced registers");
3015
3016 if (REG_P (op0) && REG_P (op1))
3017 {
3018 /* [Ra], Rb */
3019 fprintf (stream, "[%s], %s",
3020 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
3021 }
3022 else if (REG_P (op0) && CONST_INT_P (op1))
3023 {
3024 /* [Ra], imm */
3025 fprintf (stream, "[%s], %d",
3026 reg_names[REGNO (op0)], (int)INTVAL (op1));
3027 }
3028 else
3029 {
3030 /* The control flow is not supposed to be here. */
3031 debug_rtx (x);
3032 gcc_unreachable ();
3033 }
3034
3035 break;
3036
3037 case POST_INC:
3038 case POST_DEC:
3039 op0 = XEXP (x, 0);
3040
3041 /* Checking op0, forbid using static chain register ($r16)
3042 on reduced-set registers configuration. */
3043 if (TARGET_REDUCED_REGS
3044 && REG_P (op0)
3045 && REGNO (op0) == STATIC_CHAIN_REGNUM)
3046 sorry ("a nested function is not supported for reduced registers");
3047
3048 if (REG_P (op0))
3049 {
3050 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
3051 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
3052 We only need to deal with register Ra. */
3053 fprintf (stream, "[%s]", reg_names[REGNO (op0)]);
3054 }
3055 else
3056 {
3057 /* The control flow is not supposed to be here. */
3058 debug_rtx (x);
3059 gcc_unreachable ();
3060 }
3061
3062 break;
3063
3064 default :
3065 /* Generally, output_addr_const () is able to handle most cases.
3066 We want to see what CODE could appear,
3067 so we use gcc_unreachable() to stop it. */
3068 debug_rtx (x);
3069 gcc_unreachable ();
3070 break;
3071 }
3072 }
3073
3074 \f
3075 /* Defining target-specific uses of __attribute__. */
3076
3077 /* Add some checking after merging attributes. */
3078 static tree
3079 nds32_merge_decl_attributes (tree olddecl, tree newdecl)
3080 {
3081 tree combined_attrs;
3082
3083 /* Create combined attributes. */
3084 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
3085 DECL_ATTRIBUTES (newdecl));
3086
3087 /* Since newdecl is acutally a duplicate of olddecl,
3088 we can take olddecl for some operations. */
3089 if (TREE_CODE (olddecl) == FUNCTION_DECL)
3090 {
3091 /* Check isr-specific attributes conflict. */
3092 nds32_check_isr_attrs_conflict (olddecl, combined_attrs);
3093 }
3094
3095 return combined_attrs;
3096 }
3097
3098 /* Add some checking when inserting attributes. */
3099 static void
3100 nds32_insert_attributes (tree decl, tree *attributes)
3101 {
3102 /* For function declaration, we need to check isr-specific attributes:
3103 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
3104 2. Check valid integer value for interrupt/exception.
3105 3. Check valid integer value for reset.
3106 4. Check valid function for nmi/warm. */
3107 if (TREE_CODE (decl) == FUNCTION_DECL)
3108 {
3109 tree func_attrs;
3110 tree intr, excp, reset;
3111
3112 /* Pick up function attributes. */
3113 func_attrs = *attributes;
3114
3115 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
3116 nds32_check_isr_attrs_conflict (decl, func_attrs);
3117
3118 /* Now we are starting to check valid id value
3119 for interrupt/exception/reset.
3120 Note that we ONLY check its validity here.
3121 To construct isr vector information, it is still performed
3122 by nds32_construct_isr_vectors_information(). */
3123 intr = lookup_attribute ("interrupt", func_attrs);
3124 excp = lookup_attribute ("exception", func_attrs);
3125 reset = lookup_attribute ("reset", func_attrs);
3126
3127 if (intr || excp)
3128 {
3129 /* Deal with interrupt/exception. */
3130 tree id_list;
3131 unsigned int lower_bound, upper_bound;
3132
3133 /* The way to handle interrupt or exception is the same,
3134 we just need to take care of actual vector number.
3135 For interrupt(0..63), the actual vector number is (9..72).
3136 For exception(1..8), the actual vector number is (1..8). */
3137 lower_bound = (intr) ? (0) : (1);
3138 upper_bound = (intr) ? (63) : (8);
3139
3140 /* Prepare id list so that we can traverse id value. */
3141 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
3142
3143 /* 2. Check valid integer value for interrupt/exception. */
3144 while (id_list)
3145 {
3146 tree id;
3147
3148 /* Pick up each vector id value. */
3149 id = TREE_VALUE (id_list);
3150 /* Issue error if it is not a valid integer value. */
3151 if (TREE_CODE (id) != INTEGER_CST
3152 || wi::ltu_p (id, lower_bound)
3153 || wi::gtu_p (id, upper_bound))
3154 error ("invalid id value for interrupt/exception attribute");
3155
3156 /* Advance to next id. */
3157 id_list = TREE_CHAIN (id_list);
3158 }
3159 }
3160 else if (reset)
3161 {
3162 /* Deal with reset. */
3163 tree id_list;
3164 tree id;
3165 tree nmi, warm;
3166 unsigned int lower_bound;
3167 unsigned int upper_bound;
3168
3169 /* Prepare id_list and identify id value so that
3170 we can check if total number of vectors is valid. */
3171 id_list = TREE_VALUE (reset);
3172 id = TREE_VALUE (id_list);
3173
3174 /* The maximum numbers for user's interrupt is 64. */
3175 lower_bound = 0;
3176 upper_bound = 64;
3177
3178 /* 3. Check valid integer value for reset. */
3179 if (TREE_CODE (id) != INTEGER_CST
3180 || wi::ltu_p (id, lower_bound)
3181 || wi::gtu_p (id, upper_bound))
3182 error ("invalid id value for reset attribute");
3183
3184 /* 4. Check valid function for nmi/warm. */
3185 nmi = lookup_attribute ("nmi", func_attrs);
3186 warm = lookup_attribute ("warm", func_attrs);
3187
3188 if (nmi != NULL_TREE)
3189 {
3190 tree nmi_func_list;
3191 tree nmi_func;
3192
3193 nmi_func_list = TREE_VALUE (nmi);
3194 nmi_func = TREE_VALUE (nmi_func_list);
3195
3196 /* Issue error if it is not a valid nmi function. */
3197 if (TREE_CODE (nmi_func) != IDENTIFIER_NODE)
3198 error ("invalid nmi function for reset attribute");
3199 }
3200
3201 if (warm != NULL_TREE)
3202 {
3203 tree warm_func_list;
3204 tree warm_func;
3205
3206 warm_func_list = TREE_VALUE (warm);
3207 warm_func = TREE_VALUE (warm_func_list);
3208
3209 /* Issue error if it is not a valid warm function. */
3210 if (TREE_CODE (warm_func) != IDENTIFIER_NODE)
3211 error ("invalid warm function for reset attribute");
3212 }
3213 }
3214 else
3215 {
3216 /* No interrupt, exception, or reset attribute is set. */
3217 return;
3218 }
3219 }
3220 }
3221
3222 static bool
3223 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED,
3224 tree pop_target ATTRIBUTE_UNUSED)
3225 {
3226 /* Currently, we do not parse any pragma target by ourself,
3227 so just simply return false. */
3228 return false;
3229 }
3230
3231 static void
3232 nds32_option_override (void)
3233 {
3234 /* After all the command options have been parsed,
3235 we shall deal with some flags for changing compiler settings. */
3236
3237 /* At first, we check if we have to strictly
3238 set some flags based on ISA family. */
3239 if (TARGET_ISA_V2)
3240 {
3241 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
3242 target_flags &= ~MASK_V3PUSH;
3243 }
3244 if (TARGET_ISA_V3)
3245 {
3246 /* Under V3 ISA, currently nothing should be strictly set. */
3247 }
3248 if (TARGET_ISA_V3M)
3249 {
3250 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
3251 target_flags |= MASK_REDUCED_REGS;
3252 /* Under V3M ISA, we need to strictly disable TARGET_PERF_EXT. */
3253 target_flags &= ~MASK_PERF_EXT;
3254 }
3255
3256 /* See if we are using reduced-set registers:
3257 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
3258 If so, we must forbid using $r11~$r14, $r16~$r27. */
3259 if (TARGET_REDUCED_REGS)
3260 {
3261 int r;
3262
3263 /* Prevent register allocator from
3264 choosing it as doing register allocation. */
3265 for (r = 11; r <= 14; r++)
3266 fixed_regs[r] = call_used_regs[r] = 1;
3267 for (r = 16; r <= 27; r++)
3268 fixed_regs[r] = call_used_regs[r] = 1;
3269 }
3270
3271 /* See if user explicitly would like to use fp-as-gp optimization.
3272 If so, we must prevent $fp from being allocated
3273 during register allocation. */
3274 if (TARGET_FORCE_FP_AS_GP)
3275 fixed_regs[FP_REGNUM] = call_used_regs[FP_REGNUM] = 1;
3276
3277 if (!TARGET_16_BIT)
3278 {
3279 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
3280 target_flags &= ~MASK_V3PUSH;
3281 }
3282
3283 /* Currently, we don't support PIC code generation yet. */
3284 if (flag_pic)
3285 sorry ("not support -fpic");
3286 }
3287
3288 \f
3289 /* Miscellaneous Parameters. */
3290
3291 static void
3292 nds32_init_builtins (void)
3293 {
3294 tree pointer_type_node = build_pointer_type (integer_type_node);
3295
3296 tree void_ftype_void = build_function_type (void_type_node,
3297 void_list_node);
3298
3299 tree void_ftype_pint = build_function_type_list (void_type_node,
3300 pointer_type_node,
3301 NULL_TREE);
3302
3303 tree int_ftype_int = build_function_type_list (integer_type_node,
3304 integer_type_node,
3305 NULL_TREE);
3306
3307 tree void_ftype_int_int = build_function_type_list (void_type_node,
3308 integer_type_node,
3309 integer_type_node,
3310 NULL_TREE);
3311
3312 /* Cache. */
3313 add_builtin_function ("__builtin_nds32_isync", void_ftype_pint,
3314 NDS32_BUILTIN_ISYNC,
3315 BUILT_IN_MD, NULL, NULL_TREE);
3316 add_builtin_function ("__builtin_nds32_isb", void_ftype_void,
3317 NDS32_BUILTIN_ISB,
3318 BUILT_IN_MD, NULL, NULL_TREE);
3319
3320 /* Register Transfer. */
3321 add_builtin_function ("__builtin_nds32_mfsr", int_ftype_int,
3322 NDS32_BUILTIN_MFSR,
3323 BUILT_IN_MD, NULL, NULL_TREE);
3324 add_builtin_function ("__builtin_nds32_mfusr", int_ftype_int,
3325 NDS32_BUILTIN_MFUSR,
3326 BUILT_IN_MD, NULL, NULL_TREE);
3327 add_builtin_function ("__builtin_nds32_mtsr", void_ftype_int_int,
3328 NDS32_BUILTIN_MTSR,
3329 BUILT_IN_MD, NULL, NULL_TREE);
3330 add_builtin_function ("__builtin_nds32_mtusr", void_ftype_int_int,
3331 NDS32_BUILTIN_MTUSR,
3332 BUILT_IN_MD, NULL, NULL_TREE);
3333
3334 /* Interrupt. */
3335 add_builtin_function ("__builtin_nds32_setgie_en", void_ftype_void,
3336 NDS32_BUILTIN_SETGIE_EN,
3337 BUILT_IN_MD, NULL, NULL_TREE);
3338 add_builtin_function ("__builtin_nds32_setgie_dis", void_ftype_void,
3339 NDS32_BUILTIN_SETGIE_DIS,
3340 BUILT_IN_MD, NULL, NULL_TREE);
3341 }
3342
3343 static rtx
3344 nds32_expand_builtin (tree exp,
3345 rtx target,
3346 rtx subtarget ATTRIBUTE_UNUSED,
3347 enum machine_mode mode ATTRIBUTE_UNUSED,
3348 int ignore ATTRIBUTE_UNUSED)
3349 {
3350 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
3351
3352 int fcode = DECL_FUNCTION_CODE (fndecl);
3353
3354 switch (fcode)
3355 {
3356 /* Cache. */
3357 case NDS32_BUILTIN_ISYNC:
3358 return nds32_expand_builtin_null_ftype_reg
3359 (CODE_FOR_unspec_volatile_isync, exp, target);
3360 case NDS32_BUILTIN_ISB:
3361 /* Since there are no result and operands for isb instruciton,
3362 we can simply emit this rtx. */
3363 emit_insn (gen_unspec_volatile_isb ());
3364 return target;
3365
3366 /* Register Transfer. */
3367 case NDS32_BUILTIN_MFSR:
3368 return nds32_expand_builtin_reg_ftype_imm
3369 (CODE_FOR_unspec_volatile_mfsr, exp, target);
3370 case NDS32_BUILTIN_MFUSR:
3371 return nds32_expand_builtin_reg_ftype_imm
3372 (CODE_FOR_unspec_volatile_mfusr, exp, target);
3373 case NDS32_BUILTIN_MTSR:
3374 return nds32_expand_builtin_null_ftype_reg_imm
3375 (CODE_FOR_unspec_volatile_mtsr, exp, target);
3376 case NDS32_BUILTIN_MTUSR:
3377 return nds32_expand_builtin_null_ftype_reg_imm
3378 (CODE_FOR_unspec_volatile_mtusr, exp, target);
3379
3380 /* Interrupt. */
3381 case NDS32_BUILTIN_SETGIE_EN:
3382 /* Since there are no result and operands for setgie.e instruciton,
3383 we can simply emit this rtx. */
3384 emit_insn (gen_unspec_volatile_setgie_en ());
3385 return target;
3386 case NDS32_BUILTIN_SETGIE_DIS:
3387 /* Since there are no result and operands for setgie.d instruciton,
3388 we can simply emit this rtx. */
3389 emit_insn (gen_unspec_volatile_setgie_dis ());
3390 return target;
3391
3392 default:
3393 gcc_unreachable ();
3394 }
3395
3396 return NULL_RTX;
3397 }
3398
3399
3400 /* ------------------------------------------------------------------------ */
3401
3402 /* PART 4: Implemet extern function definitions,
3403 the prototype is in nds32-protos.h. */
3404 \f
3405 /* Defining Data Structures for Per-function Information. */
3406
3407 void
3408 nds32_init_expanders (void)
3409 {
3410 /* Arrange to initialize and mark the machine per-function status. */
3411 init_machine_status = nds32_init_machine_status;
3412 }
3413
3414 \f
3415 /* Register Usage. */
3416
3417 /* -- How Values Fit in Registers. */
3418
3419 int
3420 nds32_hard_regno_nregs (int regno ATTRIBUTE_UNUSED,
3421 enum machine_mode mode)
3422 {
3423 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
3424 }
3425
3426 int
3427 nds32_hard_regno_mode_ok (int regno, enum machine_mode mode)
3428 {
3429 /* Restrict double-word quantities to even register pairs. */
3430 if (HARD_REGNO_NREGS (regno, mode) == 1
3431 || !((regno) & 1))
3432 return 1;
3433
3434 return 0;
3435 }
3436
3437 \f
3438 /* Register Classes. */
3439
3440 enum reg_class
3441 nds32_regno_reg_class (int regno)
3442 {
3443 /* Refer to nds32.h for more register class details. */
3444
3445 if (regno >= 0 && regno <= 7)
3446 return LOW_REGS;
3447 else if (regno >= 8 && regno <= 11)
3448 return MIDDLE_REGS;
3449 else if (regno >= 12 && regno <= 14)
3450 return HIGH_REGS;
3451 else if (regno == 15)
3452 return R15_TA_REG;
3453 else if (regno >= 16 && regno <= 19)
3454 return MIDDLE_REGS;
3455 else if (regno >= 20 && regno <= 31)
3456 return HIGH_REGS;
3457 else if (regno == 32 || regno == 33)
3458 return FRAME_REGS;
3459 else
3460 return NO_REGS;
3461 }
3462
3463 \f
3464 /* Stack Layout and Calling Conventions. */
3465
3466 /* -- Basic Stack Layout. */
3467
3468 rtx
3469 nds32_return_addr_rtx (int count,
3470 rtx frameaddr ATTRIBUTE_UNUSED)
3471 {
3472 /* There is no way to determine the return address
3473 if frameaddr is the frame that has 'count' steps
3474 up from current frame. */
3475 if (count != 0)
3476 return NULL_RTX;
3477
3478 /* If count == 0, it means we are at current frame,
3479 the return address is $r30 ($lp). */
3480 return get_hard_reg_initial_val (Pmode, LP_REGNUM);
3481 }
3482
3483 /* -- Eliminating Frame Pointer and Arg Pointer. */
3484
3485 HOST_WIDE_INT
3486 nds32_initial_elimination_offset (unsigned int from_reg, unsigned int to_reg)
3487 {
3488 HOST_WIDE_INT offset;
3489
3490 /* Compute and setup stack frame size.
3491 The result will be in cfun->machine. */
3492 nds32_compute_stack_frame ();
3493
3494 /* Remember to consider
3495 cfun->machine->callee_saved_area_padding_bytes
3496 when calculating offset. */
3497 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
3498 {
3499 offset = (cfun->machine->fp_size
3500 + cfun->machine->gp_size
3501 + cfun->machine->lp_size
3502 + cfun->machine->callee_saved_regs_size
3503 + cfun->machine->callee_saved_area_padding_bytes
3504 + cfun->machine->local_size
3505 + cfun->machine->out_args_size);
3506 }
3507 else if (from_reg == ARG_POINTER_REGNUM
3508 && to_reg == HARD_FRAME_POINTER_REGNUM)
3509 {
3510 offset = 0;
3511 }
3512 else if (from_reg == FRAME_POINTER_REGNUM
3513 && to_reg == STACK_POINTER_REGNUM)
3514 {
3515 offset = (cfun->machine->local_size + cfun->machine->out_args_size);
3516 }
3517 else if (from_reg == FRAME_POINTER_REGNUM
3518 && to_reg == HARD_FRAME_POINTER_REGNUM)
3519 {
3520 offset = (-1) * (cfun->machine->fp_size
3521 + cfun->machine->gp_size
3522 + cfun->machine->lp_size
3523 + cfun->machine->callee_saved_regs_size
3524 + cfun->machine->callee_saved_area_padding_bytes);
3525 }
3526 else
3527 {
3528 gcc_unreachable ();
3529 }
3530
3531 return offset;
3532 }
3533
3534 /* -- Passing Arguments in Registers. */
3535
3536 void
3537 nds32_init_cumulative_args (CUMULATIVE_ARGS *cum,
3538 tree fntype ATTRIBUTE_UNUSED,
3539 rtx libname ATTRIBUTE_UNUSED,
3540 tree fndecl ATTRIBUTE_UNUSED,
3541 int n_named_args ATTRIBUTE_UNUSED)
3542 {
3543 /* Initial available registers
3544 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
3545 for passing arguments. */
3546 cum->reg_offset = 0;
3547 }
3548
3549 /* -- Function Entry and Exit. */
3550
3551 /* Function for normal multiple push prologue. */
3552 void
3553 nds32_expand_prologue (void)
3554 {
3555 int fp_adjust;
3556 int sp_adjust;
3557 int en4_const;
3558
3559 rtx Rb, Re;
3560 rtx push_insn;
3561 rtx fp_adjust_insn, sp_adjust_insn;
3562
3563 /* Before computing everything for stack frame size,
3564 we check if it is still worth to use fp_as_gp optimization.
3565 If it is, the 'df_regs_ever_live_p (FP_REGNUM)' will be set
3566 so that $fp will be saved on stack. */
3567 cfun->machine->fp_as_gp_p = nds32_fp_as_gp_check_available ();
3568
3569 /* Compute and setup stack frame size.
3570 The result will be in cfun->machine. */
3571 nds32_compute_stack_frame ();
3572
3573 /* If the function is 'naked',
3574 we do not have to generate prologue code fragment. */
3575 if (cfun->machine->naked_p)
3576 return;
3577
3578 /* Get callee_first_regno and callee_last_regno. */
3579 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
3580 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
3581
3582 /* push_insn = gen_stack_push_multiple(first_regno, last_regno),
3583 the pattern 'stack_push_multiple' is implemented in nds32.md.
3584 For En4 field, we have to calculate its constant value.
3585 Refer to Andes ISA for more information. */
3586 en4_const = 0;
3587 if (cfun->machine->fp_size)
3588 en4_const += 8;
3589 if (cfun->machine->gp_size)
3590 en4_const += 4;
3591 if (cfun->machine->lp_size)
3592 en4_const += 2;
3593
3594 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3595 to be saved, we don't have to create multiple push instruction.
3596 Otherwise, a multiple push instruction is needed. */
3597 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
3598 {
3599 /* Create multiple push instruction rtx. */
3600 push_insn = nds32_gen_stack_push_multiple (Rb, Re, GEN_INT (en4_const));
3601 /* Emit rtx into instructions list and receive INSN rtx form. */
3602 push_insn = emit_insn (push_insn);
3603
3604 /* The insn rtx 'push_insn' will change frame layout.
3605 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3606 generate CFI (Call Frame Information) stuff. */
3607 RTX_FRAME_RELATED_P (push_insn) = 1;
3608 }
3609
3610 /* Check frame_pointer_needed to see
3611 if we shall emit fp adjustment instruction. */
3612 if (frame_pointer_needed)
3613 {
3614 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
3615 + (4 * callee-saved-registers)
3616 Note: No need to adjust
3617 cfun->machine->callee_saved_area_padding_bytes,
3618 because, at this point, stack pointer is just
3619 at the position after push instruction. */
3620 fp_adjust = cfun->machine->fp_size
3621 + cfun->machine->gp_size
3622 + cfun->machine->lp_size
3623 + cfun->machine->callee_saved_regs_size;
3624 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3625 stack_pointer_rtx,
3626 GEN_INT (fp_adjust));
3627 /* Emit rtx into instructions list and receive INSN rtx form. */
3628 fp_adjust_insn = emit_insn (fp_adjust_insn);
3629 }
3630
3631 /* Adjust $sp = $sp - local_size - out_args_size
3632 - callee_saved_area_padding_bytes. */
3633 sp_adjust = cfun->machine->local_size
3634 + cfun->machine->out_args_size
3635 + cfun->machine->callee_saved_area_padding_bytes;
3636 /* sp_adjust value may be out of range of the addi instruction,
3637 create alternative add behavior with TA_REGNUM if necessary,
3638 using NEGATIVE value to tell that we are decreasing address. */
3639 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3640 if (sp_adjust)
3641 {
3642 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
3643 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3644 stack_pointer_rtx,
3645 GEN_INT (-1 * sp_adjust));
3646 /* Emit rtx into instructions list and receive INSN rtx form. */
3647 sp_adjust_insn = emit_insn (sp_adjust_insn);
3648
3649 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3650 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3651 generate CFI (Call Frame Information) stuff. */
3652 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3653 }
3654
3655 /* Prevent the instruction scheduler from
3656 moving instructions across the boundary. */
3657 emit_insn (gen_blockage ());
3658 }
3659
3660 /* Function for normal multiple pop epilogue. */
3661 void
3662 nds32_expand_epilogue (void)
3663 {
3664 int sp_adjust;
3665 int en4_const;
3666
3667 rtx Rb, Re;
3668 rtx pop_insn;
3669 rtx sp_adjust_insn;
3670
3671 /* Compute and setup stack frame size.
3672 The result will be in cfun->machine. */
3673 nds32_compute_stack_frame ();
3674
3675 /* Prevent the instruction scheduler from
3676 moving instructions across the boundary. */
3677 emit_insn (gen_blockage ());
3678
3679 /* If the function is 'naked', we do not have to generate
3680 epilogue code fragment BUT 'ret' instruction. */
3681 if (cfun->machine->naked_p)
3682 {
3683 /* Generate return instruction by using
3684 unspec_volatile_func_return pattern.
3685 Make sure this instruction is after gen_blockage().
3686 NOTE that $lp will become 'live'
3687 after this instruction has been emitted. */
3688 emit_insn (gen_unspec_volatile_func_return ());
3689 return;
3690 }
3691
3692 if (frame_pointer_needed)
3693 {
3694 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
3695 - (4 * callee-saved-registers)
3696 Note: No need to adjust
3697 cfun->machine->callee_saved_area_padding_bytes,
3698 because we want to adjust stack pointer
3699 to the position for pop instruction. */
3700 sp_adjust = cfun->machine->fp_size
3701 + cfun->machine->gp_size
3702 + cfun->machine->lp_size
3703 + cfun->machine->callee_saved_regs_size;
3704 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3705 hard_frame_pointer_rtx,
3706 GEN_INT (-1 * sp_adjust));
3707 /* Emit rtx into instructions list and receive INSN rtx form. */
3708 sp_adjust_insn = emit_insn (sp_adjust_insn);
3709 }
3710 else
3711 {
3712 /* If frame pointer is NOT needed,
3713 we cannot calculate the sp adjustment from frame pointer.
3714 Instead, we calculate the adjustment by local_size,
3715 out_args_size, and callee_saved_area_padding_bytes.
3716 Notice that such sp adjustment value may be out of range,
3717 so we have to deal with it as well. */
3718
3719 /* Adjust $sp = $sp + local_size + out_args_size
3720 + callee_saved_area_padding_bytes. */
3721 sp_adjust = cfun->machine->local_size
3722 + cfun->machine->out_args_size
3723 + cfun->machine->callee_saved_area_padding_bytes;
3724 /* sp_adjust value may be out of range of the addi instruction,
3725 create alternative add behavior with TA_REGNUM if necessary,
3726 using POSITIVE value to tell that we are increasing address. */
3727 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3728 if (sp_adjust)
3729 {
3730 /* Generate sp adjustment instruction
3731 if and only if sp_adjust != 0. */
3732 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3733 stack_pointer_rtx,
3734 GEN_INT (sp_adjust));
3735 /* Emit rtx into instructions list and receive INSN rtx form. */
3736 sp_adjust_insn = emit_insn (sp_adjust_insn);
3737 }
3738 }
3739
3740 /* Get callee_first_regno and callee_last_regno. */
3741 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
3742 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
3743
3744 /* pop_insn = gen_stack_pop_multiple(first_regno, last_regno),
3745 the pattern 'stack_pop_multiple' is implementad in nds32.md.
3746 For En4 field, we have to calculate its constant value.
3747 Refer to Andes ISA for more information. */
3748 en4_const = 0;
3749 if (cfun->machine->fp_size)
3750 en4_const += 8;
3751 if (cfun->machine->gp_size)
3752 en4_const += 4;
3753 if (cfun->machine->lp_size)
3754 en4_const += 2;
3755
3756 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
3757 to be saved, we don't have to create multiple pop instruction.
3758 Otherwise, a multiple pop instruction is needed. */
3759 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
3760 {
3761 /* Create multiple pop instruction rtx. */
3762 pop_insn = nds32_gen_stack_pop_multiple (Rb, Re, GEN_INT (en4_const));
3763 /* Emit pop instruction. */
3764 emit_insn (pop_insn);
3765 }
3766
3767 /* Generate return instruction by using
3768 unspec_volatile_func_return pattern. */
3769 emit_insn (gen_unspec_volatile_func_return ());
3770 }
3771
3772 /* Function for v3push prologue. */
3773 void
3774 nds32_expand_prologue_v3push (void)
3775 {
3776 int fp_adjust;
3777 int sp_adjust;
3778
3779 rtx Rb, Re;
3780 rtx push_insn;
3781 rtx fp_adjust_insn, sp_adjust_insn;
3782
3783 /* Before computing everything for stack frame size,
3784 we check if it is still worth to use fp_as_gp optimization.
3785 If it is, the 'df_regs_ever_live_p (FP_REGNUM)' will be set
3786 so that $fp will be saved on stack. */
3787 cfun->machine->fp_as_gp_p = nds32_fp_as_gp_check_available ();
3788
3789 /* Compute and setup stack frame size.
3790 The result will be in cfun->machine. */
3791 nds32_compute_stack_frame ();
3792
3793 /* If the function is 'naked',
3794 we do not have to generate prologue code fragment. */
3795 if (cfun->machine->naked_p)
3796 return;
3797
3798 /* Get callee_first_regno and callee_last_regno. */
3799 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
3800 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
3801
3802 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
3803 where imm8u has to be 8-byte alignment. */
3804 sp_adjust = cfun->machine->local_size
3805 + cfun->machine->out_args_size
3806 + cfun->machine->callee_saved_area_padding_bytes;
3807
3808 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3809 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
3810 {
3811 /* We can use 'push25 Re,imm8u'. */
3812
3813 /* push_insn = gen_stack_v3push(last_regno, sp_adjust),
3814 the pattern 'stack_v3push' is implemented in nds32.md.
3815 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3816 push_insn = nds32_gen_stack_v3push (Rb, Re,
3817 GEN_INT (14), GEN_INT (sp_adjust));
3818 /* emit rtx into instructions list and receive INSN rtx form */
3819 push_insn = emit_insn (push_insn);
3820
3821 /* The insn rtx 'push_insn' will change frame layout.
3822 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3823 generate CFI (Call Frame Information) stuff. */
3824 RTX_FRAME_RELATED_P (push_insn) = 1;
3825
3826 /* Check frame_pointer_needed to see
3827 if we shall emit fp adjustment instruction. */
3828 if (frame_pointer_needed)
3829 {
3830 /* adjust $fp = $sp + 4 ($fp size)
3831 + 4 ($gp size)
3832 + 4 ($lp size)
3833 + (4 * n) (callee-saved registers)
3834 + sp_adjust ('push25 Re,imm8u')
3835 Note: Since we use 'push25 Re,imm8u',
3836 the position of stack pointer is further
3837 changed after push instruction.
3838 Hence, we need to take sp_adjust value
3839 into consideration. */
3840 fp_adjust = cfun->machine->fp_size
3841 + cfun->machine->gp_size
3842 + cfun->machine->lp_size
3843 + cfun->machine->callee_saved_regs_size
3844 + sp_adjust;
3845 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3846 stack_pointer_rtx,
3847 GEN_INT (fp_adjust));
3848 /* Emit rtx into instructions list and receive INSN rtx form. */
3849 fp_adjust_insn = emit_insn (fp_adjust_insn);
3850 }
3851 }
3852 else
3853 {
3854 /* We have to use 'push25 Re,0' and
3855 expand one more instruction to adjust $sp later. */
3856
3857 /* push_insn = gen_stack_v3push(last_regno, sp_adjust),
3858 the pattern 'stack_v3push' is implemented in nds32.md.
3859 The (const_int 14) means v3push always push { $fp $gp $lp }. */
3860 push_insn = nds32_gen_stack_v3push (Rb, Re,
3861 GEN_INT (14), GEN_INT (0));
3862 /* Emit rtx into instructions list and receive INSN rtx form. */
3863 push_insn = emit_insn (push_insn);
3864
3865 /* The insn rtx 'push_insn' will change frame layout.
3866 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3867 generate CFI (Call Frame Information) stuff. */
3868 RTX_FRAME_RELATED_P (push_insn) = 1;
3869
3870 /* Check frame_pointer_needed to see
3871 if we shall emit fp adjustment instruction. */
3872 if (frame_pointer_needed)
3873 {
3874 /* adjust $fp = $sp + 4 ($fp size)
3875 + 4 ($gp size)
3876 + 4 ($lp size)
3877 + (4 * n) (callee-saved registers)
3878 Note: Since we use 'push25 Re,0',
3879 the stack pointer is just at the position
3880 after push instruction.
3881 No need to take sp_adjust into consideration. */
3882 fp_adjust = cfun->machine->fp_size
3883 + cfun->machine->gp_size
3884 + cfun->machine->lp_size
3885 + cfun->machine->callee_saved_regs_size;
3886 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
3887 stack_pointer_rtx,
3888 GEN_INT (fp_adjust));
3889 /* Emit rtx into instructions list and receive INSN rtx form. */
3890 fp_adjust_insn = emit_insn (fp_adjust_insn);
3891 }
3892
3893 /* Because we use 'push25 Re,0',
3894 we need to expand one more instruction to adjust $sp.
3895 However, sp_adjust value may be out of range of the addi instruction,
3896 create alternative add behavior with TA_REGNUM if necessary,
3897 using NEGATIVE value to tell that we are decreasing address. */
3898 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
3899 if (sp_adjust)
3900 {
3901 /* Generate sp adjustment instruction
3902 if and only if sp_adjust != 0. */
3903 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3904 stack_pointer_rtx,
3905 GEN_INT (-1 * sp_adjust));
3906 /* Emit rtx into instructions list and receive INSN rtx form. */
3907 sp_adjust_insn = emit_insn (sp_adjust_insn);
3908
3909 /* The insn rtx 'sp_adjust_insn' will change frame layout.
3910 We need to use RTX_FRAME_RELATED_P so that GCC is able to
3911 generate CFI (Call Frame Information) stuff. */
3912 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
3913 }
3914 }
3915
3916 /* Prevent the instruction scheduler from
3917 moving instructions across the boundary. */
3918 emit_insn (gen_blockage ());
3919 }
3920
3921 /* Function for v3pop epilogue. */
3922 void
3923 nds32_expand_epilogue_v3pop (void)
3924 {
3925 int sp_adjust;
3926
3927 rtx Rb, Re;
3928 rtx pop_insn;
3929 rtx sp_adjust_insn;
3930
3931 /* Compute and setup stack frame size.
3932 The result will be in cfun->machine. */
3933 nds32_compute_stack_frame ();
3934
3935 /* Prevent the instruction scheduler from
3936 moving instructions across the boundary. */
3937 emit_insn (gen_blockage ());
3938
3939 /* If the function is 'naked', we do not have to generate
3940 epilogue code fragment BUT 'ret' instruction. */
3941 if (cfun->machine->naked_p)
3942 {
3943 /* Generate return instruction by using
3944 unspec_volatile_func_return pattern.
3945 Make sure this instruction is after gen_blockage().
3946 NOTE that $lp will become 'live'
3947 after this instruction has been emitted. */
3948 emit_insn (gen_unspec_volatile_func_return ());
3949 return;
3950 }
3951
3952 /* Get callee_first_regno and callee_last_regno. */
3953 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
3954 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
3955
3956 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3957 where imm8u has to be 8-byte alignment. */
3958 sp_adjust = cfun->machine->local_size
3959 + cfun->machine->out_args_size
3960 + cfun->machine->callee_saved_area_padding_bytes;
3961
3962 /* We have to consider alloca issue as well.
3963 If the function does call alloca(), the stack pointer is not fixed.
3964 In that case, we cannot use 'pop25 Re,imm8u' directly.
3965 We have to caculate stack pointer from frame pointer
3966 and then use 'pop25 Re,0'.
3967 Of course, the frame_pointer_needed should be nonzero
3968 if the function calls alloca(). */
3969 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3970 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
3971 && !cfun->calls_alloca)
3972 {
3973 /* We can use 'pop25 Re,imm8u'. */
3974
3975 /* pop_insn = gen_stack_v3pop(last_regno, sp_adjust),
3976 the pattern 'stack_v3pop' is implementad in nds32.md.
3977 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3978 pop_insn = nds32_gen_stack_v3pop (Rb, Re,
3979 GEN_INT (14), GEN_INT (sp_adjust));
3980
3981 /* Emit pop instruction. */
3982 emit_insn (pop_insn);
3983 }
3984 else
3985 {
3986 /* We have to use 'pop25 Re,0', and prior to it,
3987 we must expand one more instruction to adjust $sp. */
3988
3989 if (frame_pointer_needed)
3990 {
3991 /* adjust $sp = $fp - 4 ($fp size)
3992 - 4 ($gp size)
3993 - 4 ($lp size)
3994 - (4 * n) (callee-saved registers)
3995 Note: No need to adjust
3996 cfun->machine->callee_saved_area_padding_bytes,
3997 because we want to adjust stack pointer
3998 to the position for pop instruction. */
3999 sp_adjust = cfun->machine->fp_size
4000 + cfun->machine->gp_size
4001 + cfun->machine->lp_size
4002 + cfun->machine->callee_saved_regs_size;
4003 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
4004 hard_frame_pointer_rtx,
4005 GEN_INT (-1 * sp_adjust));
4006 /* Emit rtx into instructions list and receive INSN rtx form. */
4007 sp_adjust_insn = emit_insn (sp_adjust_insn);
4008 }
4009 else
4010 {
4011 /* If frame pointer is NOT needed,
4012 we cannot calculate the sp adjustment from frame pointer.
4013 Instead, we calculate the adjustment by local_size,
4014 out_args_size, and callee_saved_area_padding_bytes.
4015 Notice that such sp adjustment value may be out of range,
4016 so we have to deal with it as well. */
4017
4018 /* Adjust $sp = $sp + local_size + out_args_size
4019 + callee_saved_area_padding_bytes. */
4020 sp_adjust = cfun->machine->local_size
4021 + cfun->machine->out_args_size
4022 + cfun->machine->callee_saved_area_padding_bytes;
4023 /* sp_adjust value may be out of range of the addi instruction,
4024 create alternative add behavior with TA_REGNUM if necessary,
4025 using POSITIVE value to tell that we are increasing address. */
4026 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
4027 if (sp_adjust)
4028 {
4029 /* Generate sp adjustment instruction
4030 if and only if sp_adjust != 0. */
4031 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
4032 stack_pointer_rtx,
4033 GEN_INT (sp_adjust));
4034 /* Emit rtx into instructions list and receive INSN rtx form. */
4035 sp_adjust_insn = emit_insn (sp_adjust_insn);
4036 }
4037 }
4038
4039 /* pop_insn = gen_stack_v3pop(last_regno, sp_adjust),
4040 the pattern 'stack_v3pop' is implementad in nds32.md. */
4041 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
4042 pop_insn = nds32_gen_stack_v3pop (Rb, Re,
4043 GEN_INT (14), GEN_INT (0));
4044
4045 /* Emit pop instruction. */
4046 emit_insn (pop_insn);
4047 }
4048 }
4049
4050 /* ------------------------------------------------------------------------ */
4051
4052 /* Function to test 333-form for load/store instructions.
4053 This is auxiliary extern function for auxiliary macro in nds32.h.
4054 Because it is a little complicated, we use function instead of macro. */
4055 bool
4056 nds32_ls_333_p (rtx rt, rtx ra, rtx imm, enum machine_mode mode)
4057 {
4058 if (REGNO_REG_CLASS (REGNO (rt)) == LOW_REGS
4059 && REGNO_REG_CLASS (REGNO (ra)) == LOW_REGS)
4060 {
4061 if (GET_MODE_SIZE (mode) == 4)
4062 return satisfies_constraint_Iu05 (imm);
4063
4064 if (GET_MODE_SIZE (mode) == 2)
4065 return satisfies_constraint_Iu04 (imm);
4066
4067 if (GET_MODE_SIZE (mode) == 1)
4068 return satisfies_constraint_Iu03 (imm);
4069 }
4070
4071 return false;
4072 }
4073
4074
4075 /* Functions to expand load_multiple and store_multiple.
4076 They are auxiliary extern functions to help create rtx template.
4077 Check nds32-multiple.md file for the patterns. */
4078 rtx
4079 nds32_expand_load_multiple (int base_regno, int count,
4080 rtx base_addr, rtx basemem)
4081 {
4082 int par_index;
4083 int offset;
4084 rtx result;
4085 rtx new_addr, mem, reg;
4086
4087 /* Create the pattern that is presented in nds32-multiple.md. */
4088
4089 result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
4090
4091 for (par_index = 0; par_index < count; par_index++)
4092 {
4093 offset = par_index * 4;
4094 /* 4-byte for loading data to each register. */
4095 new_addr = plus_constant (Pmode, base_addr, offset);
4096 mem = adjust_automodify_address_nv (basemem, SImode,
4097 new_addr, offset);
4098 reg = gen_rtx_REG (SImode, base_regno + par_index);
4099
4100 XVECEXP (result, 0, par_index) = gen_rtx_SET (VOIDmode, reg, mem);
4101 }
4102
4103 return result;
4104 }
4105
4106 rtx
4107 nds32_expand_store_multiple (int base_regno, int count,
4108 rtx base_addr, rtx basemem)
4109 {
4110 int par_index;
4111 int offset;
4112 rtx result;
4113 rtx new_addr, mem, reg;
4114
4115 /* Create the pattern that is presented in nds32-multiple.md. */
4116
4117 result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
4118
4119 for (par_index = 0; par_index < count; par_index++)
4120 {
4121 offset = par_index * 4;
4122 /* 4-byte for storing data to memory. */
4123 new_addr = plus_constant (Pmode, base_addr, offset);
4124 mem = adjust_automodify_address_nv (basemem, SImode,
4125 new_addr, offset);
4126 reg = gen_rtx_REG (SImode, base_regno + par_index);
4127
4128 XVECEXP (result, 0, par_index) = gen_rtx_SET (VOIDmode, mem, reg);
4129 }
4130
4131 return result;
4132 }
4133
4134 /* Function to move block memory content by
4135 using load_multiple and store_multiple.
4136 This is auxiliary extern function to help create rtx template.
4137 Check nds32-multiple.md file for the patterns. */
4138 int
4139 nds32_expand_movmemqi (rtx dstmem, rtx srcmem, rtx total_bytes, rtx alignment)
4140 {
4141 HOST_WIDE_INT in_words, out_words;
4142 rtx dst_base_reg, src_base_reg;
4143 int maximum_bytes;
4144
4145 /* Because reduced-set regsiters has few registers
4146 (r0~r5, r6~10, r15, r28~r31, where 'r15' and 'r28~r31'
4147 cannot be used for register allocation),
4148 using 8 registers (32 bytes) for moving memory block
4149 may easily consume all of them.
4150 It makes register allocation/spilling hard to work.
4151 So we only allow maximum=4 registers (16 bytes) for
4152 moving memory block under reduced-set registers. */
4153 if (TARGET_REDUCED_REGS)
4154 maximum_bytes = 16;
4155 else
4156 maximum_bytes = 32;
4157
4158 /* 1. Total_bytes is integer for sure.
4159 2. Alignment is integer for sure.
4160 3. Maximum 4 or 8 registers, 4 * 4 = 16 bytes, 8 * 4 = 32 bytes.
4161 4. Requires (n * 4) block size.
4162 5. Requires 4-byte alignment. */
4163 if (GET_CODE (total_bytes) != CONST_INT
4164 || GET_CODE (alignment) != CONST_INT
4165 || INTVAL (total_bytes) > maximum_bytes
4166 || INTVAL (total_bytes) & 3
4167 || INTVAL (alignment) & 3)
4168 return 0;
4169
4170 dst_base_reg = copy_to_mode_reg (SImode, XEXP (dstmem, 0));
4171 src_base_reg = copy_to_mode_reg (SImode, XEXP (srcmem, 0));
4172
4173 out_words = in_words = INTVAL (total_bytes) / UNITS_PER_WORD;
4174
4175 emit_insn (nds32_expand_load_multiple (0, in_words, src_base_reg, srcmem));
4176 emit_insn (nds32_expand_store_multiple (0, out_words, dst_base_reg, dstmem));
4177
4178 /* Successfully create patterns, return 1. */
4179 return 1;
4180 }
4181
4182 /* Function to check whether the OP is a valid load/store operation.
4183 This is a helper function for the predicates:
4184 'nds32_load_multiple_operation' and 'nds32_store_multiple_operation'
4185 in predicates.md file.
4186
4187 The OP is supposed to be a parallel rtx.
4188 For each element within this parallel rtx:
4189 (set (reg) (mem addr)) is the form for load operation.
4190 (set (mem addr) (reg)) is the form for store operation.
4191 We have to extract reg and mem of every element and
4192 check if the information is valid for multiple load/store operation. */
4193 bool
4194 nds32_valid_multiple_load_store (rtx op, bool load_p)
4195 {
4196 int count;
4197 int first_elt_regno;
4198 rtx elt;
4199
4200 /* Get the counts of elements in the parallel rtx. */
4201 count = XVECLEN (op, 0);
4202 /* Pick up the first element. */
4203 elt = XVECEXP (op, 0, 0);
4204
4205 /* Perform some quick check for the first element in the parallel rtx. */
4206 if (GET_CODE (elt) != SET
4207 || count <= 1
4208 || count > 8)
4209 return false;
4210
4211 /* Pick up regno of first element for further detail checking.
4212 Note that the form is different between load and store operation. */
4213 if (load_p)
4214 {
4215 if (GET_CODE (SET_DEST (elt)) != REG
4216 || GET_CODE (SET_SRC (elt)) != MEM)
4217 return false;
4218
4219 first_elt_regno = REGNO (SET_DEST (elt));
4220 }
4221 else
4222 {
4223 if (GET_CODE (SET_SRC (elt)) != REG
4224 || GET_CODE (SET_DEST (elt)) != MEM)
4225 return false;
4226
4227 first_elt_regno = REGNO (SET_SRC (elt));
4228 }
4229
4230 /* Perform detail check for each element.
4231 Refer to nds32-multiple.md for more information
4232 about following checking.
4233 The starting element of parallel rtx is index 0. */
4234 if (!nds32_consecutive_registers_load_store_p (op, load_p, 0,
4235 first_elt_regno,
4236 count))
4237 return false;
4238
4239 /* Pass all test, this is a valid rtx. */
4240 return true;
4241 }
4242
4243 /* Function to check whether the OP is a valid stack push/pop operation.
4244 For a valid stack operation, it must satisfy following conditions:
4245 1. Consecutive registers push/pop operations.
4246 2. Valid $fp/$gp/$lp push/pop operations.
4247 3. The last element must be stack adjustment rtx.
4248 See the prologue/epilogue implementation for details. */
4249 bool
4250 nds32_valid_stack_push_pop (rtx op, bool push_p)
4251 {
4252 int index;
4253 int total_count;
4254 int rest_count;
4255 int first_regno;
4256 rtx elt;
4257 rtx elt_reg;
4258 rtx elt_mem;
4259 rtx elt_plus;
4260
4261 /* Get the counts of elements in the parallel rtx. */
4262 total_count = XVECLEN (op, 0);
4263
4264 /* Perform some quick check for that every element should be 'set'. */
4265 for (index = 0; index < total_count; index++)
4266 {
4267 elt = XVECEXP (op, 0, index);
4268 if (GET_CODE (elt) != SET)
4269 return false;
4270 }
4271
4272 /* For push operation, the parallel rtx looks like:
4273 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
4274 (reg:SI Rb))
4275 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
4276 (reg:SI Rb+1))
4277 ...
4278 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
4279 (reg:SI Re))
4280 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
4281 (reg:SI FP_REGNUM))
4282 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
4283 (reg:SI GP_REGNUM))
4284 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
4285 (reg:SI LP_REGNUM))
4286 (set (reg:SI SP_REGNUM)
4287 (plus (reg:SI SP_REGNUM) (const_int -32)))])
4288
4289 For pop operation, the parallel rtx looks like:
4290 (parallel [(set (reg:SI Rb)
4291 (mem (reg:SI SP_REGNUM)))
4292 (set (reg:SI Rb+1)
4293 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
4294 ...
4295 (set (reg:SI Re)
4296 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
4297 (set (reg:SI FP_REGNUM)
4298 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
4299 (set (reg:SI GP_REGNUM)
4300 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
4301 (set (reg:SI LP_REGNUM)
4302 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
4303 (set (reg:SI SP_REGNUM)
4304 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
4305
4306 /* 1. Consecutive registers push/pop operations.
4307 We need to calculate how many registers should be consecutive.
4308 The $sp adjustment rtx, $fp push rtx, $gp push rtx,
4309 and $lp push rtx are excluded. */
4310
4311 /* Exclude last $sp adjustment rtx. */
4312 rest_count = total_count - 1;
4313 /* Exclude $fp, $gp, and $lp if they are in the parallel rtx. */
4314 if (cfun->machine->fp_size)
4315 rest_count--;
4316 if (cfun->machine->gp_size)
4317 rest_count--;
4318 if (cfun->machine->lp_size)
4319 rest_count--;
4320
4321 if (rest_count > 0)
4322 {
4323 elt = XVECEXP (op, 0, 0);
4324 /* Pick up register element. */
4325 elt_reg = push_p ? SET_SRC (elt) : SET_DEST (elt);
4326 first_regno = REGNO (elt_reg);
4327
4328 /* The 'push' operation is a kind of store operation.
4329 The 'pop' operation is a kind of load operation.
4330 Pass corresponding false/true as second argument (bool load_p).
4331 The par_index is supposed to start with index 0. */
4332 if (!nds32_consecutive_registers_load_store_p (op,
4333 !push_p ? true : false,
4334 0,
4335 first_regno,
4336 rest_count))
4337 return false;
4338 }
4339
4340 /* 2. Valid $fp/$gp/$lp push/pop operations.
4341 Remember to set start index for checking them. */
4342
4343 /* The rest_count is the start index for checking $fp/$gp/$lp. */
4344 index = rest_count;
4345 /* If index < 0, this parallel rtx is definitely
4346 not a valid stack push/pop operation. */
4347 if (index < 0)
4348 return false;
4349
4350 /* Check $fp/$gp/$lp one by one.
4351 We use 'push_p' to pick up reg rtx and mem rtx. */
4352 if (cfun->machine->fp_size)
4353 {
4354 elt = XVECEXP (op, 0, index);
4355 elt_mem = push_p ? SET_DEST (elt) : SET_SRC (elt);
4356 elt_reg = push_p ? SET_SRC (elt) : SET_DEST (elt);
4357 index++;
4358
4359 if (GET_CODE (elt_mem) != MEM
4360 || GET_CODE (elt_reg) != REG
4361 || REGNO (elt_reg) != FP_REGNUM)
4362 return false;
4363 }
4364 if (cfun->machine->gp_size)
4365 {
4366 elt = XVECEXP (op, 0, index);
4367 elt_mem = push_p ? SET_DEST (elt) : SET_SRC (elt);
4368 elt_reg = push_p ? SET_SRC (elt) : SET_DEST (elt);
4369 index++;
4370
4371 if (GET_CODE (elt_mem) != MEM
4372 || GET_CODE (elt_reg) != REG
4373 || REGNO (elt_reg) != GP_REGNUM)
4374 return false;
4375 }
4376 if (cfun->machine->lp_size)
4377 {
4378 elt = XVECEXP (op, 0, index);
4379 elt_mem = push_p ? SET_DEST (elt) : SET_SRC (elt);
4380 elt_reg = push_p ? SET_SRC (elt) : SET_DEST (elt);
4381 index++;
4382
4383 if (GET_CODE (elt_mem) != MEM
4384 || GET_CODE (elt_reg) != REG
4385 || REGNO (elt_reg) != LP_REGNUM)
4386 return false;
4387 }
4388
4389 /* 3. The last element must be stack adjustment rtx.
4390 Its form of rtx should be:
4391 (set (reg:SI SP_REGNUM)
4392 (plus (reg:SI SP_REGNUM) (const_int X)))
4393 The X could be positive or negative value. */
4394
4395 /* Pick up the last element. */
4396 elt = XVECEXP (op, 0, total_count - 1);
4397
4398 /* Extract its destination and source rtx. */
4399 elt_reg = SET_DEST (elt);
4400 elt_plus = SET_SRC (elt);
4401
4402 /* Check this is (set (stack_reg) (plus stack_reg const)) pattern. */
4403 if (GET_CODE (elt_reg) != REG
4404 || GET_CODE (elt_plus) != PLUS
4405 || REGNO (elt_reg) != SP_REGNUM)
4406 return false;
4407
4408 /* Pass all test, this is a valid rtx. */
4409 return true;
4410 }
4411
4412 /* Computing the Length of an Insn.
4413 Modifies the length assigned to instruction INSN.
4414 LEN is the initially computed length of the insn. */
4415 int
4416 nds32_adjust_insn_length (rtx insn, int length)
4417 {
4418 rtx src, dst;
4419
4420 switch (recog_memoized (insn))
4421 {
4422 case CODE_FOR_move_df:
4423 case CODE_FOR_move_di:
4424 /* Adjust length of movd44 to 2. */
4425 src = XEXP (PATTERN (insn), 1);
4426 dst = XEXP (PATTERN (insn), 0);
4427
4428 if (REG_P (src)
4429 && REG_P (dst)
4430 && (REGNO (src) % 2) == 0
4431 && (REGNO (dst) % 2) == 0)
4432 length = 2;
4433 break;
4434
4435 default:
4436 break;
4437 }
4438
4439 return length;
4440 }
4441
4442
4443 /* Function to check if 'bclr' instruction can be used with IVAL. */
4444 int
4445 nds32_can_use_bclr_p (int ival)
4446 {
4447 int one_bit_count;
4448
4449 /* Calculate the number of 1-bit of (~ival), if there is only one 1-bit,
4450 it means the original ival has only one 0-bit,
4451 So it is ok to perform 'bclr' operation. */
4452
4453 one_bit_count = popcount_hwi ((unsigned HOST_WIDE_INT) (~ival));
4454
4455 /* 'bclr' is a performance extension instruction. */
4456 return (TARGET_PERF_EXT && (one_bit_count == 1));
4457 }
4458
4459 /* Function to check if 'bset' instruction can be used with IVAL. */
4460 int
4461 nds32_can_use_bset_p (int ival)
4462 {
4463 int one_bit_count;
4464
4465 /* Caculate the number of 1-bit of ival, if there is only one 1-bit,
4466 it is ok to perform 'bset' operation. */
4467
4468 one_bit_count = popcount_hwi ((unsigned HOST_WIDE_INT) (ival));
4469
4470 /* 'bset' is a performance extension instruction. */
4471 return (TARGET_PERF_EXT && (one_bit_count == 1));
4472 }
4473
4474 /* Function to check if 'btgl' instruction can be used with IVAL. */
4475 int
4476 nds32_can_use_btgl_p (int ival)
4477 {
4478 int one_bit_count;
4479
4480 /* Caculate the number of 1-bit of ival, if there is only one 1-bit,
4481 it is ok to perform 'btgl' operation. */
4482
4483 one_bit_count = popcount_hwi ((unsigned HOST_WIDE_INT) (ival));
4484
4485 /* 'btgl' is a performance extension instruction. */
4486 return (TARGET_PERF_EXT && (one_bit_count == 1));
4487 }
4488
4489 /* Function to check if 'bitci' instruction can be used with IVAL. */
4490 int
4491 nds32_can_use_bitci_p (int ival)
4492 {
4493 /* If we are using V3 ISA, we have 'bitci' instruction.
4494 Try to see if we can present 'andi' semantic with
4495 such 'bit-clear-immediate' operation.
4496 For example, 'andi $r0,$r0,0xfffffffc' can be
4497 presented with 'bitci $r0,$r0,3'. */
4498 return (TARGET_ISA_V3
4499 && (ival < 0)
4500 && satisfies_constraint_Iu15 (gen_int_mode (~ival, SImode)));
4501 }
4502
4503
4504 /* Return true if is load/store with SYMBOL_REF addressing mode
4505 and memory mode is SImode. */
4506 bool
4507 nds32_symbol_load_store_p (rtx insn)
4508 {
4509 rtx mem_src = NULL_RTX;
4510
4511 switch (get_attr_type (insn))
4512 {
4513 case TYPE_LOAD:
4514 mem_src = SET_SRC (PATTERN (insn));
4515 break;
4516 case TYPE_STORE:
4517 mem_src = SET_DEST (PATTERN (insn));
4518 break;
4519 default:
4520 break;
4521 }
4522
4523 /* Find load/store insn with addressing mode is SYMBOL_REF. */
4524 if (mem_src != NULL_RTX)
4525 {
4526 if ((GET_CODE (mem_src) == ZERO_EXTEND)
4527 || (GET_CODE (mem_src) == SIGN_EXTEND))
4528 mem_src = XEXP (mem_src, 0);
4529
4530 if ((GET_CODE (XEXP (mem_src, 0)) == SYMBOL_REF)
4531 || (GET_CODE (XEXP (mem_src, 0)) == LO_SUM))
4532 return true;
4533 }
4534
4535 return false;
4536 }
4537
4538 /* Function to determine whether it is worth to do fp_as_gp optimization.
4539 Return 0: It is NOT worth to do fp_as_gp optimization.
4540 Return 1: It is APPROXIMATELY worth to do fp_as_gp optimization.
4541 Note that if it is worth to do fp_as_gp optimization,
4542 we MUST set FP_REGNUM ever live in this function. */
4543 int
4544 nds32_fp_as_gp_check_available (void)
4545 {
4546 /* If there exists ANY of following conditions,
4547 we DO NOT perform fp_as_gp optimization:
4548 1. TARGET_FORBID_FP_AS_GP is set
4549 regardless of the TARGET_FORCE_FP_AS_GP.
4550 2. User explicitly uses 'naked' attribute.
4551 3. Not optimize for size.
4552 4. Need frame pointer.
4553 5. If $fp is already required to be saved,
4554 it means $fp is already choosen by register allocator.
4555 Thus we better not to use it for fp_as_gp optimization.
4556 6. This function is a vararg function.
4557 DO NOT apply fp_as_gp optimization on this function
4558 because it may change and break stack frame.
4559 7. The epilogue is empty.
4560 This happens when the function uses exit()
4561 or its attribute is no_return.
4562 In that case, compiler will not expand epilogue
4563 so that we have no chance to output .omit_fp_end directive. */
4564 if (TARGET_FORBID_FP_AS_GP
4565 || lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
4566 || !optimize_size
4567 || frame_pointer_needed
4568 || NDS32_REQUIRED_CALLEE_SAVED_P (FP_REGNUM)
4569 || (cfun->stdarg == 1)
4570 || (find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == NULL))
4571 return 0;
4572
4573 /* Now we can check the possibility of using fp_as_gp optimization. */
4574 if (TARGET_FORCE_FP_AS_GP)
4575 {
4576 /* User explicitly issues -mforce-fp-as-gp option. */
4577 df_set_regs_ever_live (FP_REGNUM, 1);
4578 return 1;
4579 }
4580 else
4581 {
4582 /* In the following we are going to evaluate whether
4583 it is worth to do fp_as_gp optimization. */
4584 int good_gain = 0;
4585 int symbol_count = 0;
4586
4587 int threshold;
4588 rtx insn;
4589
4590 /* We check if there already requires prologue.
4591 Note that $gp will be saved in prologue for PIC code generation.
4592 After that, we can set threshold by the existence of prologue.
4593 Each fp-implied instruction will gain 2-byte code size
4594 from gp-aware instruction, so we have following heuristics. */
4595 if (flag_pic
4596 || nds32_have_prologue_p ())
4597 {
4598 /* Have-prologue:
4599 Compiler already intends to generate prologue content,
4600 so the fp_as_gp optimization will only insert
4601 'la $fp,_FP_BASE_' instruction, which will be
4602 converted into 4-byte instruction at link time.
4603 The threshold is "3" symbol accesses, 2 + 2 + 2 > 4. */
4604 threshold = 3;
4605 }
4606 else
4607 {
4608 /* None-prologue:
4609 Compiler originally does not generate prologue content,
4610 so the fp_as_gp optimization will NOT ONLY insert
4611 'la $fp,_FP_BASE' instruction, but also causes
4612 push/pop instructions.
4613 If we are using v3push (push25/pop25),
4614 the threshold is "5" symbol accesses, 5*2 > 4 + 2 + 2;
4615 If we are using normal push (smw/lmw),
4616 the threshold is "5+2" symbol accesses 7*2 > 4 + 4 + 4. */
4617 threshold = 5 + (TARGET_V3PUSH ? 0 : 2);
4618 }
4619
4620 /* We would like to traverse every instruction in this function.
4621 So we need to have push_topmost_sequence()/pop_topmost_sequence()
4622 surrounding our for-loop evaluation. */
4623 push_topmost_sequence ();
4624 /* Counting the insn number which the addressing mode is symbol. */
4625 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4626 {
4627 if (single_set (insn) && nds32_symbol_load_store_p (insn))
4628 symbol_count++;
4629
4630 if (symbol_count == threshold)
4631 {
4632 good_gain = 1;
4633 break;
4634 }
4635 }
4636 pop_topmost_sequence ();
4637
4638 /* Enable fp_as_gp optimization when potential gain is good enough. */
4639 if (good_gain)
4640 {
4641 df_set_regs_ever_live (FP_REGNUM, 1);
4642 return 1;
4643 }
4644 }
4645
4646 /* By default we return 0. */
4647 return 0;
4648 }
4649
4650
4651 /* Function to generate PC relative jump table.
4652 Refer to nds32.md for more details.
4653
4654 The following is the sample for the case that diff value
4655 can be presented in '.short' size.
4656
4657 addi $r1, $r1, -(case_lower_bound)
4658 slti $ta, $r1, (case_number)
4659 beqz $ta, .L_skip_label
4660
4661 la $ta, .L35 ! get jump table address
4662 lh $r1, [$ta + $r1 << 1] ! load symbol diff from jump table entry
4663 addi $ta, $r1, $ta
4664 jr5 $ta
4665
4666 ! jump table entry
4667 L35:
4668 .short .L25-.L35
4669 .short .L26-.L35
4670 .short .L27-.L35
4671 .short .L28-.L35
4672 .short .L29-.L35
4673 .short .L30-.L35
4674 .short .L31-.L35
4675 .short .L32-.L35
4676 .short .L33-.L35
4677 .short .L34-.L35 */
4678 const char *
4679 nds32_output_casesi_pc_relative (rtx *operands)
4680 {
4681 enum machine_mode mode;
4682 rtx diff_vec;
4683
4684 diff_vec = PATTERN (NEXT_INSN (operands[1]));
4685
4686 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
4687
4688 /* Step C: "t <-- operands[1]". */
4689 output_asm_insn ("la\t$ta, %l1", operands);
4690
4691 /* Get the mode of each element in the difference vector. */
4692 mode = GET_MODE (diff_vec);
4693
4694 /* Step D: "z <-- (mem (plus (operands[0] << m) t))",
4695 where m is 0, 1, or 2 to load address-diff value from table. */
4696 switch (mode)
4697 {
4698 case QImode:
4699 output_asm_insn ("lb\t%2, [$ta + %0 << 0]", operands);
4700 break;
4701 case HImode:
4702 output_asm_insn ("lh\t%2, [$ta + %0 << 1]", operands);
4703 break;
4704 case SImode:
4705 output_asm_insn ("lw\t%2, [$ta + %0 << 2]", operands);
4706 break;
4707 default:
4708 gcc_unreachable ();
4709 }
4710
4711 /* Step E: "t <-- z + t".
4712 Add table label_ref with address-diff value to
4713 obtain target case address. */
4714 output_asm_insn ("add\t$ta, %2, $ta", operands);
4715
4716 /* Step F: jump to target with register t. */
4717 if (TARGET_16_BIT)
4718 return "jr5\t$ta";
4719 else
4720 return "jr\t$ta";
4721 }
4722
4723 /* Function to generate normal jump table. */
4724 const char *
4725 nds32_output_casesi (rtx *operands)
4726 {
4727 /* Step C: "t <-- operands[1]". */
4728 output_asm_insn ("la\t$ta, %l1", operands);
4729
4730 /* Step D: "z <-- (mem (plus (operands[0] << 2) t))". */
4731 output_asm_insn ("lw\t%2, [$ta + %0 << 2]", operands);
4732
4733 /* No need to perform Step E, which is only used for
4734 pc relative jump table. */
4735
4736 /* Step F: jump to target with register z. */
4737 if (TARGET_16_BIT)
4738 return "jr5\t%2";
4739 else
4740 return "jr\t%2";
4741 }
4742
4743
4744 /* Function to return memory format. */
4745 enum nds32_16bit_address_type
4746 nds32_mem_format (rtx op)
4747 {
4748 enum machine_mode mode_test;
4749 int val;
4750 int regno;
4751
4752 if (!TARGET_16_BIT)
4753 return ADDRESS_NOT_16BIT_FORMAT;
4754
4755 mode_test = GET_MODE (op);
4756
4757 op = XEXP (op, 0);
4758
4759 /* 45 format. */
4760 if (GET_CODE (op) == REG && (mode_test == SImode))
4761 return ADDRESS_REG;
4762
4763 /* 333 format for QI/HImode. */
4764 if (GET_CODE (op) == REG && (REGNO (op) < R8_REGNUM))
4765 return ADDRESS_LO_REG_IMM3U;
4766
4767 /* post_inc 333 format. */
4768 if ((GET_CODE (op) == POST_INC) && (mode_test == SImode))
4769 {
4770 regno = REGNO(XEXP (op, 0));
4771
4772 if (regno < 8)
4773 return ADDRESS_POST_INC_LO_REG_IMM3U;
4774 }
4775
4776 /* post_inc 333 format. */
4777 if ((GET_CODE (op) == POST_MODIFY)
4778 && (mode_test == SImode)
4779 && (REG_P (XEXP (XEXP (op, 1), 0)))
4780 && (CONST_INT_P (XEXP (XEXP (op, 1), 1))))
4781 {
4782 regno = REGNO (XEXP (XEXP (op, 1), 0));
4783 val = INTVAL (XEXP (XEXP (op, 1), 1));
4784 if (regno < 8 && val < 32)
4785 return ADDRESS_POST_INC_LO_REG_IMM3U;
4786 }
4787
4788 if ((GET_CODE (op) == PLUS)
4789 && (GET_CODE (XEXP (op, 0)) == REG)
4790 && (GET_CODE (XEXP (op, 1)) == CONST_INT))
4791 {
4792 val = INTVAL (XEXP (op, 1));
4793
4794 regno = REGNO(XEXP (op, 0));
4795
4796 if (regno > 7
4797 && regno != SP_REGNUM
4798 && regno != FP_REGNUM)
4799 return ADDRESS_NOT_16BIT_FORMAT;
4800
4801 switch (mode_test)
4802 {
4803 case QImode:
4804 /* 333 format. */
4805 if (val >= 0 && val < 8 && regno < 8)
4806 return ADDRESS_LO_REG_IMM3U;
4807 break;
4808
4809 case HImode:
4810 /* 333 format. */
4811 if (val >= 0 && val < 16 && (val % 2 == 0) && regno < 8)
4812 return ADDRESS_LO_REG_IMM3U;
4813 break;
4814
4815 case SImode:
4816 case SFmode:
4817 case DFmode:
4818 /* fp imply 37 format. */
4819 if ((regno == FP_REGNUM) &&
4820 (val >= 0 && val < 512 && (val % 4 == 0)))
4821 return ADDRESS_FP_IMM7U;
4822 /* sp imply 37 format. */
4823 else if ((regno == SP_REGNUM) &&
4824 (val >= 0 && val < 512 && (val % 4 == 0)))
4825 return ADDRESS_SP_IMM7U;
4826 /* 333 format. */
4827 else if (val >= 0 && val < 32 && (val % 4 == 0) && regno < 8)
4828 return ADDRESS_LO_REG_IMM3U;
4829 break;
4830
4831 default:
4832 break;
4833 }
4834 }
4835
4836 return ADDRESS_NOT_16BIT_FORMAT;
4837 }
4838
4839 /* Output 16-bit store. */
4840 const char *
4841 nds32_output_16bit_store (rtx *operands, int byte)
4842 {
4843 char pattern[100];
4844 char size;
4845 rtx code = XEXP (operands[0], 0);
4846
4847 size = nds32_byte_to_size (byte);
4848
4849 switch (nds32_mem_format (operands[0]))
4850 {
4851 case ADDRESS_REG:
4852 operands[0] = code;
4853 output_asm_insn ("swi450\t%1, [%0]", operands);
4854 break;
4855 case ADDRESS_LO_REG_IMM3U:
4856 snprintf (pattern, sizeof (pattern), "s%ci333\t%%1, %%0", size);
4857 output_asm_insn (pattern, operands);
4858 break;
4859 case ADDRESS_POST_INC_LO_REG_IMM3U:
4860 snprintf (pattern, sizeof (pattern), "s%ci333.bi\t%%1, %%0", size);
4861 output_asm_insn (pattern, operands);
4862 break;
4863 case ADDRESS_FP_IMM7U:
4864 output_asm_insn ("swi37\t%1, %0", operands);
4865 break;
4866 case ADDRESS_SP_IMM7U:
4867 /* Get immediate value and set back to operands[1]. */
4868 operands[0] = XEXP (code, 1);
4869 output_asm_insn ("swi37.sp\t%1, [ + (%0)]", operands);
4870 break;
4871 default:
4872 break;
4873 }
4874
4875 return "";
4876 }
4877
4878 /* Output 16-bit load. */
4879 const char *
4880 nds32_output_16bit_load (rtx *operands, int byte)
4881 {
4882 char pattern[100];
4883 unsigned char size;
4884 rtx code = XEXP (operands[1], 0);
4885
4886 size = nds32_byte_to_size (byte);
4887
4888 switch (nds32_mem_format (operands[1]))
4889 {
4890 case ADDRESS_REG:
4891 operands[1] = code;
4892 output_asm_insn ("lwi450\t%0, [%1]", operands);
4893 break;
4894 case ADDRESS_LO_REG_IMM3U:
4895 snprintf (pattern, sizeof (pattern), "l%ci333\t%%0, %%1", size);
4896 output_asm_insn (pattern, operands);
4897 break;
4898 case ADDRESS_POST_INC_LO_REG_IMM3U:
4899 snprintf (pattern, sizeof (pattern), "l%ci333.bi\t%%0, %%1", size);
4900 output_asm_insn (pattern, operands);
4901 break;
4902 case ADDRESS_FP_IMM7U:
4903 output_asm_insn ("lwi37\t%0, %1", operands);
4904 break;
4905 case ADDRESS_SP_IMM7U:
4906 /* Get immediate value and set back to operands[0]. */
4907 operands[1] = XEXP (code, 1);
4908 output_asm_insn ("lwi37.sp\t%0, [ + (%1)]", operands);
4909 break;
4910 default:
4911 break;
4912 }
4913
4914 return "";
4915 }
4916
4917 /* Output 32-bit store. */
4918 const char *
4919 nds32_output_32bit_store (rtx *operands, int byte)
4920 {
4921 char pattern[100];
4922 unsigned char size;
4923 rtx code = XEXP (operands[0], 0);
4924
4925 size = nds32_byte_to_size (byte);
4926
4927 switch (GET_CODE (code))
4928 {
4929 case REG:
4930 /* (mem (reg X))
4931 => access location by using register,
4932 use "sbi / shi / swi" */
4933 snprintf (pattern, sizeof (pattern), "s%ci\t%%1, %%0", size);
4934 break;
4935
4936 case SYMBOL_REF:
4937 case CONST:
4938 /* (mem (symbol_ref X))
4939 (mem (const (...)))
4940 => access global variables,
4941 use "sbi.gp / shi.gp / swi.gp" */
4942 operands[0] = XEXP (operands[0], 0);
4943 snprintf (pattern, sizeof (pattern), "s%ci.gp\t%%1, [ + %%0]", size);
4944 break;
4945
4946 case POST_INC:
4947 /* (mem (post_inc reg))
4948 => access location by using register which will be post increment,
4949 use "sbi.bi / shi.bi / swi.bi" */
4950 snprintf (pattern, sizeof (pattern),
4951 "s%ci.bi\t%%1, %%0, %d", size, byte);
4952 break;
4953
4954 case POST_DEC:
4955 /* (mem (post_dec reg))
4956 => access location by using register which will be post decrement,
4957 use "sbi.bi / shi.bi / swi.bi" */
4958 snprintf (pattern, sizeof (pattern),
4959 "s%ci.bi\t%%1, %%0, -%d", size, byte);
4960 break;
4961
4962 case POST_MODIFY:
4963 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
4964 {
4965 case REG:
4966 case SUBREG:
4967 /* (mem (post_modify (reg) (plus (reg) (reg))))
4968 => access location by using register which will be
4969 post modified with reg,
4970 use "sb.bi/ sh.bi / sw.bi" */
4971 snprintf (pattern, sizeof (pattern), "s%c.bi\t%%1, %%0", size);
4972 break;
4973 case CONST_INT:
4974 /* (mem (post_modify (reg) (plus (reg) (const_int))))
4975 => access location by using register which will be
4976 post modified with const_int,
4977 use "sbi.bi/ shi.bi / swi.bi" */
4978 snprintf (pattern, sizeof (pattern), "s%ci.bi\t%%1, %%0", size);
4979 break;
4980 default:
4981 abort ();
4982 }
4983 break;
4984
4985 case PLUS:
4986 switch (GET_CODE (XEXP (code, 1)))
4987 {
4988 case REG:
4989 case SUBREG:
4990 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
4991 => access location by adding two registers,
4992 use "sb / sh / sw" */
4993 snprintf (pattern, sizeof (pattern), "s%c\t%%1, %%0", size);
4994 break;
4995 case CONST_INT:
4996 /* (mem (plus reg const_int))
4997 => access location by adding one register with const_int,
4998 use "sbi / shi / swi" */
4999 snprintf (pattern, sizeof (pattern), "s%ci\t%%1, %%0", size);
5000 break;
5001 default:
5002 abort ();
5003 }
5004 break;
5005
5006 case LO_SUM:
5007 operands[2] = XEXP (code, 1);
5008 operands[0] = XEXP (code, 0);
5009 snprintf (pattern, sizeof (pattern),
5010 "s%ci\t%%1, [%%0 + lo12(%%2)]", size);
5011 break;
5012
5013 default:
5014 abort ();
5015 }
5016
5017 output_asm_insn (pattern, operands);
5018 return "";
5019 }
5020
5021 /* Output 32-bit load. */
5022 const char *
5023 nds32_output_32bit_load (rtx *operands, int byte)
5024 {
5025 char pattern[100];
5026 unsigned char size;
5027 rtx code;
5028
5029 code = XEXP (operands[1], 0);
5030
5031 size = nds32_byte_to_size (byte);
5032
5033 switch (GET_CODE (code))
5034 {
5035 case REG:
5036 /* (mem (reg X))
5037 => access location by using register,
5038 use "lbi / lhi / lwi" */
5039 snprintf (pattern, sizeof (pattern), "l%ci\t%%0, %%1", size);
5040 break;
5041
5042 case SYMBOL_REF:
5043 case CONST:
5044 /* (mem (symbol_ref X))
5045 (mem (const (...)))
5046 => access global variables,
5047 use "lbi.gp / lhi.gp / lwi.gp" */
5048 operands[1] = XEXP (operands[1], 0);
5049 snprintf (pattern, sizeof (pattern), "l%ci.gp\t%%0, [ + %%1]", size);
5050 break;
5051
5052 case POST_INC:
5053 /* (mem (post_inc reg))
5054 => access location by using register which will be post increment,
5055 use "lbi.bi / lhi.bi / lwi.bi" */
5056 snprintf (pattern, sizeof (pattern),
5057 "l%ci.bi\t%%0, %%1, %d", size, byte);
5058 break;
5059
5060 case POST_DEC:
5061 /* (mem (post_dec reg))
5062 => access location by using register which will be post decrement,
5063 use "lbi.bi / lhi.bi / lwi.bi" */
5064 snprintf (pattern, sizeof (pattern),
5065 "l%ci.bi\t%%0, %%1, -%d", size, byte);
5066 break;
5067
5068 case POST_MODIFY:
5069 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
5070 {
5071 case REG:
5072 case SUBREG:
5073 /* (mem (post_modify (reg) (plus (reg) (reg))))
5074 => access location by using register which will be
5075 post modified with reg,
5076 use "lb.bi/ lh.bi / lw.bi" */
5077 snprintf (pattern, sizeof (pattern), "l%c.bi\t%%0, %%1", size);
5078 break;
5079 case CONST_INT:
5080 /* (mem (post_modify (reg) (plus (reg) (const_int))))
5081 => access location by using register which will be
5082 post modified with const_int,
5083 use "lbi.bi/ lhi.bi / lwi.bi" */
5084 snprintf (pattern, sizeof (pattern), "l%ci.bi\t%%0, %%1", size);
5085 break;
5086 default:
5087 abort ();
5088 }
5089 break;
5090
5091 case PLUS:
5092 switch (GET_CODE (XEXP (code, 1)))
5093 {
5094 case REG:
5095 case SUBREG:
5096 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
5097 use "lb / lh / lw" */
5098 snprintf (pattern, sizeof (pattern), "l%c\t%%0, %%1", size);
5099 break;
5100 case CONST_INT:
5101 /* (mem (plus reg const_int))
5102 => access location by adding one register with const_int,
5103 use "lbi / lhi / lwi" */
5104 snprintf (pattern, sizeof (pattern), "l%ci\t%%0, %%1", size);
5105 break;
5106 default:
5107 abort ();
5108 }
5109 break;
5110
5111 case LO_SUM:
5112 operands[2] = XEXP (code, 1);
5113 operands[1] = XEXP (code, 0);
5114 snprintf (pattern, sizeof (pattern),
5115 "l%ci\t%%0, [%%1 + lo12(%%2)]", size);
5116 break;
5117
5118 default:
5119 abort ();
5120 }
5121
5122 output_asm_insn (pattern, operands);
5123 return "";
5124 }
5125
5126 /* Output 32-bit load with signed extension. */
5127 const char *
5128 nds32_output_32bit_load_s (rtx *operands, int byte)
5129 {
5130 char pattern[100];
5131 unsigned char size;
5132 rtx code;
5133
5134 code = XEXP (operands[1], 0);
5135
5136 size = nds32_byte_to_size (byte);
5137
5138 switch (GET_CODE (code))
5139 {
5140 case REG:
5141 /* (mem (reg X))
5142 => access location by using register,
5143 use "lbsi / lhsi" */
5144 snprintf (pattern, sizeof (pattern), "l%csi\t%%0, %%1", size);
5145 break;
5146
5147 case SYMBOL_REF:
5148 case CONST:
5149 /* (mem (symbol_ref X))
5150 (mem (const (...)))
5151 => access global variables,
5152 use "lbsi.gp / lhsi.gp" */
5153 operands[1] = XEXP (operands[1], 0);
5154 snprintf (pattern, sizeof (pattern), "l%csi.gp\t%%0, [ + %%1]", size);
5155 break;
5156
5157 case POST_INC:
5158 /* (mem (post_inc reg))
5159 => access location by using register which will be post increment,
5160 use "lbsi.bi / lhsi.bi" */
5161 snprintf (pattern, sizeof (pattern),
5162 "l%csi.bi\t%%0, %%1, %d", size, byte);
5163 break;
5164
5165 case POST_DEC:
5166 /* (mem (post_dec reg))
5167 => access location by using register which will be post decrement,
5168 use "lbsi.bi / lhsi.bi" */
5169 snprintf (pattern, sizeof (pattern),
5170 "l%csi.bi\t%%0, %%1, -%d", size, byte);
5171 break;
5172
5173 case POST_MODIFY:
5174 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
5175 {
5176 case REG:
5177 case SUBREG:
5178 /* (mem (post_modify (reg) (plus (reg) (reg))))
5179 => access location by using register which will be
5180 post modified with reg,
5181 use "lbs.bi/ lhs.bi" */
5182 snprintf (pattern, sizeof (pattern), "l%cs.bi\t%%0, %%1", size);
5183 break;
5184 case CONST_INT:
5185 /* (mem (post_modify (reg) (plus (reg) (const_int))))
5186 => access location by using register which will be
5187 post modified with const_int,
5188 use "lbsi.bi/ lhsi.bi" */
5189 snprintf (pattern, sizeof (pattern), "l%csi.bi\t%%0, %%1", size);
5190 break;
5191 default:
5192 abort ();
5193 }
5194 break;
5195
5196 case PLUS:
5197 switch (GET_CODE (XEXP (code, 1)))
5198 {
5199 case REG:
5200 case SUBREG:
5201 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
5202 use "lbs / lhs" */
5203 snprintf (pattern, sizeof (pattern), "l%cs\t%%0, %%1", size);
5204 break;
5205 case CONST_INT:
5206 /* (mem (plus reg const_int))
5207 => access location by adding one register with const_int,
5208 use "lbsi / lhsi" */
5209 snprintf (pattern, sizeof (pattern), "l%csi\t%%0, %%1", size);
5210 break;
5211 default:
5212 abort ();
5213 }
5214 break;
5215
5216 case LO_SUM:
5217 operands[2] = XEXP (code, 1);
5218 operands[1] = XEXP (code, 0);
5219 snprintf (pattern, sizeof (pattern),
5220 "l%csi\t%%0, [%%1 + lo12(%%2)]", size);
5221 break;
5222
5223 default:
5224 abort ();
5225 }
5226
5227 output_asm_insn (pattern, operands);
5228 return "";
5229 }
5230
5231 /* Function to output stack push operation.
5232 We need to deal with normal stack push multiple or stack v3push. */
5233 const char *
5234 nds32_output_stack_push (void)
5235 {
5236 /* A string pattern for output_asm_insn(). */
5237 char pattern[100];
5238 /* The operands array which will be used in output_asm_insn(). */
5239 rtx operands[3];
5240 /* Pick up callee-saved first regno and last regno for further use. */
5241 int rb_regno = cfun->machine->callee_saved_regs_first_regno;
5242 int re_regno = cfun->machine->callee_saved_regs_last_regno;
5243
5244 if (TARGET_V3PUSH)
5245 {
5246 /* For stack v3push:
5247 operands[0]: Re
5248 operands[1]: imm8u */
5249
5250 /* This variable is to check if 'push25 Re,imm8u' is available. */
5251 int sp_adjust;
5252
5253 /* Set operands[0]. */
5254 operands[0] = gen_rtx_REG (SImode, re_regno);
5255
5256 /* Check if we can generate 'push25 Re,imm8u',
5257 otherwise, generate 'push25 Re,0'. */
5258 sp_adjust = cfun->machine->local_size
5259 + cfun->machine->out_args_size
5260 + cfun->machine->callee_saved_area_padding_bytes;
5261 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
5262 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
5263 operands[1] = GEN_INT (sp_adjust);
5264 else
5265 operands[1] = GEN_INT (0);
5266
5267 /* Create assembly code pattern. */
5268 snprintf (pattern, sizeof (pattern), "push25\t%%0, %%1");
5269 }
5270 else
5271 {
5272 /* For normal stack push multiple:
5273 operands[0]: Rb
5274 operands[1]: Re
5275 operands[2]: En4 */
5276
5277 /* This variable is used to check if we only need to generate En4 field.
5278 As long as Rb==Re=SP_REGNUM, we set this variable to 1. */
5279 int push_en4_only_p = 0;
5280
5281 /* Set operands[0] and operands[1]. */
5282 operands[0] = gen_rtx_REG (SImode, rb_regno);
5283 operands[1] = gen_rtx_REG (SImode, re_regno);
5284
5285 /* 'smw.adm $sp,[$sp],$sp,0' means push nothing. */
5286 if (!cfun->machine->fp_size
5287 && !cfun->machine->gp_size
5288 && !cfun->machine->lp_size
5289 && REGNO (operands[0]) == SP_REGNUM
5290 && REGNO (operands[1]) == SP_REGNUM)
5291 {
5292 /* No need to generate instruction. */
5293 return "";
5294 }
5295 else
5296 {
5297 /* If Rb==Re=SP_REGNUM, we only need to generate En4 field. */
5298 if (REGNO (operands[0]) == SP_REGNUM
5299 && REGNO (operands[1]) == SP_REGNUM)
5300 push_en4_only_p = 1;
5301
5302 /* Create assembly code pattern.
5303 We need to handle the form: "Rb, Re, { $fp $gp $lp }". */
5304 snprintf (pattern, sizeof (pattern),
5305 "push.s\t%s{%s%s%s }",
5306 push_en4_only_p ? "" : "%0, %1, ",
5307 cfun->machine->fp_size ? " $fp" : "",
5308 cfun->machine->gp_size ? " $gp" : "",
5309 cfun->machine->lp_size ? " $lp" : "");
5310 }
5311 }
5312
5313 /* We use output_asm_insn() to output assembly code by ourself. */
5314 output_asm_insn (pattern, operands);
5315 return "";
5316 }
5317
5318 /* Function to output stack pop operation.
5319 We need to deal with normal stack pop multiple or stack v3pop. */
5320 const char *
5321 nds32_output_stack_pop (void)
5322 {
5323 /* A string pattern for output_asm_insn(). */
5324 char pattern[100];
5325 /* The operands array which will be used in output_asm_insn(). */
5326 rtx operands[3];
5327 /* Pick up callee-saved first regno and last regno for further use. */
5328 int rb_regno = cfun->machine->callee_saved_regs_first_regno;
5329 int re_regno = cfun->machine->callee_saved_regs_last_regno;
5330
5331 if (TARGET_V3PUSH)
5332 {
5333 /* For stack v3pop:
5334 operands[0]: Re
5335 operands[1]: imm8u */
5336
5337 /* This variable is to check if 'pop25 Re,imm8u' is available. */
5338 int sp_adjust;
5339
5340 /* Set operands[0]. */
5341 operands[0] = gen_rtx_REG (SImode, re_regno);
5342
5343 /* Check if we can generate 'pop25 Re,imm8u',
5344 otherwise, generate 'pop25 Re,0'.
5345 We have to consider alloca issue as well.
5346 If the function does call alloca(), the stack pointer is not fixed.
5347 In that case, we cannot use 'pop25 Re,imm8u' directly.
5348 We have to caculate stack pointer from frame pointer
5349 and then use 'pop25 Re,0'. */
5350 sp_adjust = cfun->machine->local_size
5351 + cfun->machine->out_args_size
5352 + cfun->machine->callee_saved_area_padding_bytes;
5353 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
5354 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
5355 && !cfun->calls_alloca)
5356 operands[1] = GEN_INT (sp_adjust);
5357 else
5358 operands[1] = GEN_INT (0);
5359
5360 /* Create assembly code pattern. */
5361 snprintf (pattern, sizeof (pattern), "pop25\t%%0, %%1");
5362 }
5363 else
5364 {
5365 /* For normal stack pop multiple:
5366 operands[0]: Rb
5367 operands[1]: Re
5368 operands[2]: En4 */
5369
5370 /* This variable is used to check if we only need to generate En4 field.
5371 As long as Rb==Re=SP_REGNUM, we set this variable to 1. */
5372 int pop_en4_only_p = 0;
5373
5374 /* Set operands[0] and operands[1]. */
5375 operands[0] = gen_rtx_REG (SImode, rb_regno);
5376 operands[1] = gen_rtx_REG (SImode, re_regno);
5377
5378 /* 'lmw.bim $sp,[$sp],$sp,0' means pop nothing. */
5379 if (!cfun->machine->fp_size
5380 && !cfun->machine->gp_size
5381 && !cfun->machine->lp_size
5382 && REGNO (operands[0]) == SP_REGNUM
5383 && REGNO (operands[1]) == SP_REGNUM)
5384 {
5385 /* No need to generate instruction. */
5386 return "";
5387 }
5388 else
5389 {
5390 /* If Rb==Re=SP_REGNUM, we only need to generate En4 field. */
5391 if (REGNO (operands[0]) == SP_REGNUM
5392 && REGNO (operands[1]) == SP_REGNUM)
5393 pop_en4_only_p = 1;
5394
5395 /* Create assembly code pattern.
5396 We need to handle the form: "Rb, Re, { $fp $gp $lp }". */
5397 snprintf (pattern, sizeof (pattern),
5398 "pop.s\t%s{%s%s%s }",
5399 pop_en4_only_p ? "" : "%0, %1, ",
5400 cfun->machine->fp_size ? " $fp" : "",
5401 cfun->machine->gp_size ? " $gp" : "",
5402 cfun->machine->lp_size ? " $lp" : "");
5403 }
5404 }
5405
5406 /* We use output_asm_insn() to output assembly code by ourself. */
5407 output_asm_insn (pattern, operands);
5408 return "";
5409 }
5410
5411 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
5412 int
5413 nds32_target_alignment (rtx label)
5414 {
5415 rtx insn;
5416
5417 if (optimize_size)
5418 return 0;
5419
5420 insn = next_active_insn (label);
5421
5422 if (insn == 0)
5423 return 0;
5424 else if ((get_attr_length (insn) % 4) == 0)
5425 return 2;
5426 else
5427 return 0;
5428 }
5429
5430 /* ------------------------------------------------------------------------ */
5431
5432 /* PART 5: Initialize target hook structure and definitions. */
5433 \f
5434 /* Controlling the Compilation Driver. */
5435
5436 \f
5437 /* Run-time Target Specification. */
5438
5439 \f
5440 /* Defining Data Structures for Per-function Information. */
5441
5442 \f
5443 /* Storage Layout. */
5444
5445 #undef TARGET_PROMOTE_FUNCTION_MODE
5446 #define TARGET_PROMOTE_FUNCTION_MODE \
5447 default_promote_function_mode_always_promote
5448
5449 \f
5450 /* Layout of Source Language Data Types. */
5451
5452 \f
5453 /* Register Usage. */
5454
5455 /* -- Basic Characteristics of Registers. */
5456
5457 /* -- Order of Allocation of Registers. */
5458
5459 /* -- How Values Fit in Registers. */
5460
5461 /* -- Handling Leaf Functions. */
5462
5463 /* -- Registers That Form a Stack. */
5464
5465 \f
5466 /* Register Classes. */
5467
5468 #undef TARGET_CLASS_MAX_NREGS
5469 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
5470
5471 #undef TARGET_LRA_P
5472 #define TARGET_LRA_P hook_bool_void_true
5473
5474 #undef TARGET_REGISTER_PRIORITY
5475 #define TARGET_REGISTER_PRIORITY nds32_register_priority
5476
5477 \f
5478 /* Obsolete Macros for Defining Constraints. */
5479
5480 \f
5481 /* Stack Layout and Calling Conventions. */
5482
5483 /* -- Basic Stack Layout. */
5484
5485 /* -- Exception Handling Support. */
5486
5487 /* -- Specifying How Stack Checking is Done. */
5488
5489 /* -- Registers That Address the Stack Frame. */
5490
5491 /* -- Eliminating Frame Pointer and Arg Pointer. */
5492
5493 #undef TARGET_CAN_ELIMINATE
5494 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
5495
5496 /* -- Passing Function Arguments on the Stack. */
5497
5498 /* -- Passing Arguments in Registers. */
5499
5500 #undef TARGET_FUNCTION_ARG
5501 #define TARGET_FUNCTION_ARG nds32_function_arg
5502
5503 #undef TARGET_FUNCTION_ARG_ADVANCE
5504 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
5505
5506 #undef TARGET_FUNCTION_ARG_BOUNDARY
5507 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
5508
5509 /* -- How Scalar Function Values Are Returned. */
5510
5511 #undef TARGET_FUNCTION_VALUE
5512 #define TARGET_FUNCTION_VALUE nds32_function_value
5513
5514 #undef TARGET_LIBCALL_VALUE
5515 #define TARGET_LIBCALL_VALUE nds32_libcall_value
5516
5517 #undef TARGET_FUNCTION_VALUE_REGNO_P
5518 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
5519
5520 /* -- How Large Values Are Returned. */
5521
5522 /* -- Caller-Saves Register Allocation. */
5523
5524 /* -- Function Entry and Exit. */
5525
5526 #undef TARGET_ASM_FUNCTION_PROLOGUE
5527 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
5528
5529 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
5530 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
5531
5532 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
5533 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
5534
5535 #undef TARGET_ASM_FUNCTION_EPILOGUE
5536 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
5537
5538 #undef TARGET_ASM_OUTPUT_MI_THUNK
5539 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
5540
5541 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
5542 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
5543
5544 /* -- Generating Code for Profiling. */
5545
5546 /* -- Permitting tail calls. */
5547
5548 #undef TARGET_WARN_FUNC_RETURN
5549 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
5550
5551 /* Stack smashing protection. */
5552
5553 \f
5554 /* Implementing the Varargs Macros. */
5555
5556 #undef TARGET_STRICT_ARGUMENT_NAMING
5557 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
5558
5559 \f
5560 /* Trampolines for Nested Functions. */
5561
5562 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
5563 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
5564
5565 #undef TARGET_TRAMPOLINE_INIT
5566 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
5567
5568 \f
5569 /* Implicit Calls to Library Routines. */
5570
5571 \f
5572 /* Addressing Modes. */
5573
5574 #undef TARGET_LEGITIMATE_ADDRESS_P
5575 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
5576
5577 \f
5578 /* Anchored Addresses. */
5579
5580 \f
5581 /* Condition Code Status. */
5582
5583 /* -- Representation of condition codes using (cc0). */
5584
5585 /* -- Representation of condition codes using registers. */
5586
5587 /* -- Macros to control conditional execution. */
5588
5589 \f
5590 /* Describing Relative Costs of Operations. */
5591
5592 #undef TARGET_REGISTER_MOVE_COST
5593 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
5594
5595 #undef TARGET_MEMORY_MOVE_COST
5596 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
5597
5598 #undef TARGET_RTX_COSTS
5599 #define TARGET_RTX_COSTS nds32_rtx_costs
5600
5601 #undef TARGET_ADDRESS_COST
5602 #define TARGET_ADDRESS_COST nds32_address_cost
5603
5604 \f
5605 /* Adjusting the Instruction Scheduler. */
5606
5607 \f
5608 /* Dividing the Output into Sections (Texts, Data, . . . ). */
5609
5610 \f
5611 /* Position Independent Code. */
5612
5613 \f
5614 /* Defining the Output Assembler Language. */
5615
5616 /* -- The Overall Framework of an Assembler File. */
5617
5618 #undef TARGET_ASM_FILE_START
5619 #define TARGET_ASM_FILE_START nds32_asm_file_start
5620 #undef TARGET_ASM_FILE_END
5621 #define TARGET_ASM_FILE_END nds32_asm_file_end
5622
5623 /* -- Output of Data. */
5624
5625 #undef TARGET_ASM_ALIGNED_HI_OP
5626 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
5627
5628 #undef TARGET_ASM_ALIGNED_SI_OP
5629 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
5630
5631 /* -- Output of Uninitialized Variables. */
5632
5633 /* -- Output and Generation of Labels. */
5634
5635 #undef TARGET_ASM_GLOBALIZE_LABEL
5636 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
5637
5638 /* -- How Initialization Functions Are Handled. */
5639
5640 /* -- Macros Controlling Initialization Routines. */
5641
5642 /* -- Output of Assembler Instructions. */
5643
5644 #undef TARGET_PRINT_OPERAND
5645 #define TARGET_PRINT_OPERAND nds32_print_operand
5646 #undef TARGET_PRINT_OPERAND_ADDRESS
5647 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
5648
5649 /* -- Output of Dispatch Tables. */
5650
5651 /* -- Assembler Commands for Exception Regions. */
5652
5653 /* -- Assembler Commands for Alignment. */
5654
5655 \f
5656 /* Controlling Debugging Information Format. */
5657
5658 /* -- Macros Affecting All Debugging Formats. */
5659
5660 /* -- Specific Options for DBX Output. */
5661
5662 /* -- Open-Ended Hooks for DBX Format. */
5663
5664 /* -- File Names in DBX Format. */
5665
5666 /* -- Macros for SDB and DWARF Output. */
5667
5668 /* -- Macros for VMS Debug Format. */
5669
5670 \f
5671 /* Cross Compilation and Floating Point. */
5672
5673 \f
5674 /* Mode Switching Instructions. */
5675
5676 \f
5677 /* Defining target-specific uses of __attribute__. */
5678
5679 #undef TARGET_ATTRIBUTE_TABLE
5680 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
5681
5682 #undef TARGET_MERGE_DECL_ATTRIBUTES
5683 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
5684
5685 #undef TARGET_INSERT_ATTRIBUTES
5686 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
5687
5688 #undef TARGET_OPTION_PRAGMA_PARSE
5689 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
5690
5691 #undef TARGET_OPTION_OVERRIDE
5692 #define TARGET_OPTION_OVERRIDE nds32_option_override
5693
5694 \f
5695 /* Emulating TLS. */
5696
5697 \f
5698 /* Defining coprocessor specifics for MIPS targets. */
5699
5700 \f
5701 /* Parameters for Precompiled Header Validity Checking. */
5702
5703 \f
5704 /* C++ ABI parameters. */
5705
5706 \f
5707 /* Adding support for named address spaces. */
5708
5709 \f
5710 /* Miscellaneous Parameters. */
5711
5712 #undef TARGET_INIT_BUILTINS
5713 #define TARGET_INIT_BUILTINS nds32_init_builtins
5714
5715 #undef TARGET_EXPAND_BUILTIN
5716 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
5717
5718 \f
5719 /* ------------------------------------------------------------------------ */
5720
5721 /* Initialize the GCC target structure. */
5722
5723 struct gcc_target targetm = TARGET_INITIALIZER;
5724
5725 /* ------------------------------------------------------------------------ */