]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/nds32/nds32.c
Move cost calculation to nds32-cost.c module.
[thirdparty/gcc.git] / gcc / config / nds32 / nds32.c
1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2014 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "varasm.h"
30 #include "calls.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h" /* Required by recog.h. */
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h" /* For DFA state_t. */
38 #include "insn-codes.h" /* For CODE_FOR_xxx. */
39 #include "reload.h" /* For push_reload(). */
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "recog.h"
44 #include "diagnostic-core.h"
45 #include "df.h"
46 #include "tm_p.h"
47 #include "tm-constrs.h"
48 #include "optabs.h" /* For GEN_FCN. */
49 #include "target.h"
50 #include "target-def.h"
51 #include "langhooks.h" /* For add_builtin_function(). */
52 #include "ggc.h"
53 #include "builtins.h"
54
55 /* ------------------------------------------------------------------------ */
56
57 /* This file is divided into five parts:
58
59 PART 1: Auxiliary static variable definitions and
60 target hook static variable definitions.
61
62 PART 2: Auxiliary static function definitions.
63
64 PART 3: Implement target hook stuff definitions.
65
66 PART 4: Implemet extern function definitions,
67 the prototype is in nds32-protos.h.
68
69 PART 5: Initialize target hook structure and definitions. */
70
71 /* ------------------------------------------------------------------------ */
72
73 /* PART 1: Auxiliary static variable definitions and
74 target hook static variable definitions. */
75
76 /* Define intrinsic register names.
77 Please refer to nds32_intrinsic.h file, the index is corresponding to
78 'enum nds32_intrinsic_registers' data type values.
79 NOTE that the base value starting from 1024. */
80 static const char * const nds32_intrinsic_register_names[] =
81 {
82 "$PSW", "$IPSW", "$ITYPE", "$IPC"
83 };
84
85 /* Defining target-specific uses of __attribute__. */
86 static const struct attribute_spec nds32_attribute_table[] =
87 {
88 /* Syntax: { name, min_len, max_len, decl_required, type_required,
89 function_type_required, handler, affects_type_identity } */
90
91 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
92 { "interrupt", 1, 64, false, false, false, NULL, false },
93 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
94 { "exception", 1, 8, false, false, false, NULL, false },
95 /* Argument is user's interrupt numbers. The vector number is always 0. */
96 { "reset", 1, 1, false, false, false, NULL, false },
97
98 /* The attributes describing isr nested type. */
99 { "nested", 0, 0, false, false, false, NULL, false },
100 { "not_nested", 0, 0, false, false, false, NULL, false },
101 { "nested_ready", 0, 0, false, false, false, NULL, false },
102
103 /* The attributes describing isr register save scheme. */
104 { "save_all", 0, 0, false, false, false, NULL, false },
105 { "partial_save", 0, 0, false, false, false, NULL, false },
106
107 /* The attributes used by reset attribute. */
108 { "nmi", 1, 1, false, false, false, NULL, false },
109 { "warm", 1, 1, false, false, false, NULL, false },
110
111 /* The attribute telling no prologue/epilogue. */
112 { "naked", 0, 0, false, false, false, NULL, false },
113
114 /* The last attribute spec is set to be NULL. */
115 { NULL, 0, 0, false, false, false, NULL, false }
116 };
117
118
119 /* ------------------------------------------------------------------------ */
120
121 /* PART 2: Auxiliary static function definitions. */
122
123 /* Function to save and restore machine-specific function data. */
124 static struct machine_function *
125 nds32_init_machine_status (void)
126 {
127 struct machine_function *machine;
128 machine = ggc_cleared_alloc<machine_function> ();
129
130 /* Initially assume this function needs prologue/epilogue. */
131 machine->naked_p = 0;
132
133 /* Initially assume this function does NOT use fp_as_gp optimization. */
134 machine->fp_as_gp_p = 0;
135
136 return machine;
137 }
138
139 /* Function to compute stack frame size and
140 store into cfun->machine structure. */
141 static void
142 nds32_compute_stack_frame (void)
143 {
144 int r;
145 int block_size;
146
147 /* Because nds32_compute_stack_frame() will be called from different place,
148 everytime we enter this function, we have to assume this function
149 needs prologue/epilogue. */
150 cfun->machine->naked_p = 0;
151
152 /* Get variadic arguments size to prepare pretend arguments and
153 push them into stack at prologue.
154 Currently, we do not push variadic arguments by ourself.
155 We have GCC handle all the works.
156 The caller will push all corresponding nameless arguments into stack,
157 and the callee is able to retrieve them without problems.
158 These variables are still preserved in case one day
159 we would like caller passing arguments with registers. */
160 cfun->machine->va_args_size = 0;
161 cfun->machine->va_args_first_regno = SP_REGNUM;
162 cfun->machine->va_args_last_regno = SP_REGNUM;
163
164 /* Get local variables, incoming variables, and temporary variables size.
165 Note that we need to make sure it is 8-byte alignment because
166 there may be no padding bytes if we are using LRA. */
167 cfun->machine->local_size = NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
168
169 /* Get outgoing arguments size. */
170 cfun->machine->out_args_size = crtl->outgoing_args_size;
171
172 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
173 Check whether $fp is ever live. */
174 cfun->machine->fp_size = (df_regs_ever_live_p (FP_REGNUM)) ? 4 : 0;
175
176 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
177 Check whether we are using PIC code genration. */
178 cfun->machine->gp_size = (flag_pic) ? 4 : 0;
179
180 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
181 Check whether $lp is ever live. */
182 cfun->machine->lp_size = (df_regs_ever_live_p (LP_REGNUM)) ? 4 : 0;
183
184 /* Initially there is no padding bytes. */
185 cfun->machine->callee_saved_area_padding_bytes = 0;
186
187 /* Calculate the bytes of saving callee-saved registers on stack. */
188 cfun->machine->callee_saved_regs_size = 0;
189 cfun->machine->callee_saved_regs_first_regno = SP_REGNUM;
190 cfun->machine->callee_saved_regs_last_regno = SP_REGNUM;
191 /* Currently, there is no need to check $r28~$r31
192 because we will save them in another way. */
193 for (r = 0; r < 28; r++)
194 {
195 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
196 {
197 /* Mark the first required callee-saved register
198 (only need to set it once).
199 If first regno == SP_REGNUM, we can tell that
200 it is the first time to be here. */
201 if (cfun->machine->callee_saved_regs_first_regno == SP_REGNUM)
202 cfun->machine->callee_saved_regs_first_regno = r;
203 /* Mark the last required callee-saved register. */
204 cfun->machine->callee_saved_regs_last_regno = r;
205 }
206 }
207
208 /* Check if this function can omit prologue/epilogue code fragment.
209 If there is 'naked' attribute in this function,
210 we can set 'naked_p' flag to indicate that
211 we do not have to generate prologue/epilogue.
212 Or, if all the following conditions succeed,
213 we can set this function 'naked_p' as well:
214 condition 1: first_regno == last_regno == SP_REGNUM,
215 which means we do not have to save
216 any callee-saved registers.
217 condition 2: Both $lp and $fp are NOT live in this function,
218 which means we do not need to save them.
219 condition 3: There is no local_size, which means
220 we do not need to adjust $sp. */
221 if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
222 || (cfun->machine->callee_saved_regs_first_regno == SP_REGNUM
223 && cfun->machine->callee_saved_regs_last_regno == SP_REGNUM
224 && !df_regs_ever_live_p (FP_REGNUM)
225 && !df_regs_ever_live_p (LP_REGNUM)
226 && cfun->machine->local_size == 0))
227 {
228 /* Set this function 'naked_p' and
229 other functions can check this flag. */
230 cfun->machine->naked_p = 1;
231
232 /* No need to save $fp, $gp, and $lp.
233 We should set these value to be zero
234 so that nds32_initial_elimination_offset() can work properly. */
235 cfun->machine->fp_size = 0;
236 cfun->machine->gp_size = 0;
237 cfun->machine->lp_size = 0;
238
239 /* If stack usage computation is required,
240 we need to provide the static stack size. */
241 if (flag_stack_usage_info)
242 current_function_static_stack_size = 0;
243
244 /* No need to do following adjustment, return immediately. */
245 return;
246 }
247
248 /* Adjustment for v3push instructions:
249 If we are using v3push (push25/pop25) instructions,
250 we need to make sure Rb is $r6 and Re is
251 located on $r6, $r8, $r10, or $r14.
252 Some results above will be discarded and recomputed.
253 Note that it is only available under V3/V3M ISA. */
254 if (TARGET_V3PUSH)
255 {
256 /* Recompute:
257 cfun->machine->fp_size
258 cfun->machine->gp_size
259 cfun->machine->lp_size
260 cfun->machine->callee_saved_regs_first_regno
261 cfun->machine->callee_saved_regs_last_regno */
262
263 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
264 cfun->machine->fp_size = 4;
265 cfun->machine->gp_size = 4;
266 cfun->machine->lp_size = 4;
267
268 /* Remember to set Rb = $r6. */
269 cfun->machine->callee_saved_regs_first_regno = 6;
270
271 if (cfun->machine->callee_saved_regs_last_regno <= 6)
272 {
273 /* Re = $r6 */
274 cfun->machine->callee_saved_regs_last_regno = 6;
275 }
276 else if (cfun->machine->callee_saved_regs_last_regno <= 8)
277 {
278 /* Re = $r8 */
279 cfun->machine->callee_saved_regs_last_regno = 8;
280 }
281 else if (cfun->machine->callee_saved_regs_last_regno <= 10)
282 {
283 /* Re = $r10 */
284 cfun->machine->callee_saved_regs_last_regno = 10;
285 }
286 else if (cfun->machine->callee_saved_regs_last_regno <= 14)
287 {
288 /* Re = $r14 */
289 cfun->machine->callee_saved_regs_last_regno = 14;
290 }
291 else if (cfun->machine->callee_saved_regs_last_regno == SP_REGNUM)
292 {
293 /* If last_regno is SP_REGNUM, which means
294 it is never changed, so set it to Re = $r6. */
295 cfun->machine->callee_saved_regs_last_regno = 6;
296 }
297 else
298 {
299 /* The program flow should not go here. */
300 gcc_unreachable ();
301 }
302 }
303
304 /* We have correctly set callee_saved_regs_first_regno
305 and callee_saved_regs_last_regno.
306 Initially, the callee_saved_regs_size is supposed to be 0.
307 As long as callee_saved_regs_last_regno is not SP_REGNUM,
308 we can update callee_saved_regs_size with new size. */
309 if (cfun->machine->callee_saved_regs_last_regno != SP_REGNUM)
310 {
311 /* Compute pushed size of callee-saved registers. */
312 cfun->machine->callee_saved_regs_size
313 = 4 * (cfun->machine->callee_saved_regs_last_regno
314 - cfun->machine->callee_saved_regs_first_regno
315 + 1);
316 }
317
318 /* Important: We need to make sure that
319 (va_args_size + fp_size + gp_size
320 + lp_size + callee_saved_regs_size)
321 is 8-byte alignment.
322 If it is not, calculate the padding bytes. */
323 block_size = cfun->machine->va_args_size
324 + cfun->machine->fp_size
325 + cfun->machine->gp_size
326 + cfun->machine->lp_size
327 + cfun->machine->callee_saved_regs_size;
328 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
329 {
330 cfun->machine->callee_saved_area_padding_bytes
331 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
332 }
333
334 /* If stack usage computation is required,
335 we need to provide the static stack size. */
336 if (flag_stack_usage_info)
337 {
338 current_function_static_stack_size
339 = NDS32_ROUND_UP_DOUBLE_WORD (block_size)
340 + cfun->machine->local_size
341 + cfun->machine->out_args_size;
342 }
343 }
344
345 /* Function to create a parallel rtx pattern
346 which presents stack push multiple behavior.
347 The overall concept are:
348 "push registers to memory",
349 "adjust stack pointer". */
350 static rtx
351 nds32_gen_stack_push_multiple (rtx Rb, rtx Re,
352 rtx En4 ATTRIBUTE_UNUSED)
353 {
354 int regno;
355 int extra_count;
356 int num_use_regs;
357 int par_index;
358 int offset;
359
360 rtx reg;
361 rtx mem;
362 rtx push_rtx;
363 rtx adjust_sp_rtx;
364 rtx parallel_insn;
365
366 /* We need to provide a customized rtx which contains
367 necessary information for data analysis,
368 so we create a parallel rtx like this:
369 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
370 (reg:SI Rb))
371 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
372 (reg:SI Rb+1))
373 ...
374 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
375 (reg:SI Re))
376 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
377 (reg:SI FP_REGNUM))
378 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
379 (reg:SI GP_REGNUM))
380 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
381 (reg:SI LP_REGNUM))
382 (set (reg:SI SP_REGNUM)
383 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
384
385 /* Calculate the number of registers that will be pushed. */
386 extra_count = 0;
387 if (cfun->machine->fp_size)
388 extra_count++;
389 if (cfun->machine->gp_size)
390 extra_count++;
391 if (cfun->machine->lp_size)
392 extra_count++;
393 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
394 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
395 num_use_regs = extra_count;
396 else
397 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
398
399 /* In addition to used registers,
400 we need one more space for (set sp sp-x) rtx. */
401 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
402 rtvec_alloc (num_use_regs + 1));
403 par_index = 0;
404
405 /* Initialize offset and start to create push behavior. */
406 offset = -(num_use_regs * 4);
407
408 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
409 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
410 {
411 /* Rb and Re may be SP_REGNUM.
412 We need to break this loop immediately. */
413 if (regno == SP_REGNUM)
414 break;
415
416 reg = gen_rtx_REG (SImode, regno);
417 mem = gen_frame_mem (SImode, plus_constant (Pmode,
418 stack_pointer_rtx,
419 offset));
420 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
421 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
422 RTX_FRAME_RELATED_P (push_rtx) = 1;
423 offset = offset + 4;
424 par_index++;
425 }
426
427 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
428 if (cfun->machine->fp_size)
429 {
430 reg = gen_rtx_REG (SImode, FP_REGNUM);
431 mem = gen_frame_mem (SImode, plus_constant (Pmode,
432 stack_pointer_rtx,
433 offset));
434 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
435 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
436 RTX_FRAME_RELATED_P (push_rtx) = 1;
437 offset = offset + 4;
438 par_index++;
439 }
440 if (cfun->machine->gp_size)
441 {
442 reg = gen_rtx_REG (SImode, GP_REGNUM);
443 mem = gen_frame_mem (SImode, plus_constant (Pmode,
444 stack_pointer_rtx,
445 offset));
446 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
447 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
448 RTX_FRAME_RELATED_P (push_rtx) = 1;
449 offset = offset + 4;
450 par_index++;
451 }
452 if (cfun->machine->lp_size)
453 {
454 reg = gen_rtx_REG (SImode, LP_REGNUM);
455 mem = gen_frame_mem (SImode, plus_constant (Pmode,
456 stack_pointer_rtx,
457 offset));
458 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
459 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
460 RTX_FRAME_RELATED_P (push_rtx) = 1;
461 offset = offset + 4;
462 par_index++;
463 }
464
465 /* Create (set sp sp-x). */
466
467 /* We need to re-calculate the offset value again for adjustment. */
468 offset = -(num_use_regs * 4);
469 adjust_sp_rtx
470 = gen_rtx_SET (VOIDmode,
471 stack_pointer_rtx,
472 plus_constant (Pmode, stack_pointer_rtx, offset));
473 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
474 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
475
476 return parallel_insn;
477 }
478
479 /* Function to create a parallel rtx pattern
480 which presents stack pop multiple behavior.
481 The overall concept are:
482 "pop registers from memory",
483 "adjust stack pointer". */
484 static rtx
485 nds32_gen_stack_pop_multiple (rtx Rb, rtx Re,
486 rtx En4 ATTRIBUTE_UNUSED)
487 {
488 int regno;
489 int extra_count;
490 int num_use_regs;
491 int par_index;
492 int offset;
493
494 rtx reg;
495 rtx mem;
496 rtx pop_rtx;
497 rtx adjust_sp_rtx;
498 rtx parallel_insn;
499
500 /* We need to provide a customized rtx which contains
501 necessary information for data analysis,
502 so we create a parallel rtx like this:
503 (parallel [(set (reg:SI Rb)
504 (mem (reg:SI SP_REGNUM)))
505 (set (reg:SI Rb+1)
506 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
507 ...
508 (set (reg:SI Re)
509 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
510 (set (reg:SI FP_REGNUM)
511 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
512 (set (reg:SI GP_REGNUM)
513 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
514 (set (reg:SI LP_REGNUM)
515 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
516 (set (reg:SI SP_REGNUM)
517 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
518
519 /* Calculate the number of registers that will be poped. */
520 extra_count = 0;
521 if (cfun->machine->fp_size)
522 extra_count++;
523 if (cfun->machine->gp_size)
524 extra_count++;
525 if (cfun->machine->lp_size)
526 extra_count++;
527 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
528 if (REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM)
529 num_use_regs = extra_count;
530 else
531 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + extra_count;
532
533 /* In addition to used registers,
534 we need one more space for (set sp sp+x) rtx. */
535 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
536 rtvec_alloc (num_use_regs + 1));
537 par_index = 0;
538
539 /* Initialize offset and start to create pop behavior. */
540 offset = 0;
541
542 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
543 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
544 {
545 /* Rb and Re may be SP_REGNUM.
546 We need to break this loop immediately. */
547 if (regno == SP_REGNUM)
548 break;
549
550 reg = gen_rtx_REG (SImode, regno);
551 mem = gen_frame_mem (SImode, plus_constant (Pmode,
552 stack_pointer_rtx,
553 offset));
554 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
555 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
556 RTX_FRAME_RELATED_P (pop_rtx) = 1;
557 offset = offset + 4;
558 par_index++;
559 }
560
561 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
562 if (cfun->machine->fp_size)
563 {
564 reg = gen_rtx_REG (SImode, FP_REGNUM);
565 mem = gen_frame_mem (SImode, plus_constant (Pmode,
566 stack_pointer_rtx,
567 offset));
568 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
569 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
570 RTX_FRAME_RELATED_P (pop_rtx) = 1;
571 offset = offset + 4;
572 par_index++;
573 }
574 if (cfun->machine->gp_size)
575 {
576 reg = gen_rtx_REG (SImode, GP_REGNUM);
577 mem = gen_frame_mem (SImode, plus_constant (Pmode,
578 stack_pointer_rtx,
579 offset));
580 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
581 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
582 RTX_FRAME_RELATED_P (pop_rtx) = 1;
583 offset = offset + 4;
584 par_index++;
585 }
586 if (cfun->machine->lp_size)
587 {
588 reg = gen_rtx_REG (SImode, LP_REGNUM);
589 mem = gen_frame_mem (SImode, plus_constant (Pmode,
590 stack_pointer_rtx,
591 offset));
592 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
593 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
594 RTX_FRAME_RELATED_P (pop_rtx) = 1;
595 offset = offset + 4;
596 par_index++;
597 }
598
599 /* Create (set sp sp+x). */
600
601 /* The offset value is already in place. No need to re-calculate it. */
602 adjust_sp_rtx
603 = gen_rtx_SET (VOIDmode,
604 stack_pointer_rtx,
605 plus_constant (Pmode, stack_pointer_rtx, offset));
606 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
607 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
608
609 return parallel_insn;
610 }
611
612 /* Function to create a parallel rtx pattern
613 which presents stack v3push behavior.
614 The overall concept are:
615 "push registers to memory",
616 "adjust stack pointer". */
617 static rtx
618 nds32_gen_stack_v3push (rtx Rb,
619 rtx Re,
620 rtx En4 ATTRIBUTE_UNUSED,
621 rtx imm8u)
622 {
623 int regno;
624 int num_use_regs;
625 int par_index;
626 int offset;
627
628 rtx reg;
629 rtx mem;
630 rtx push_rtx;
631 rtx adjust_sp_rtx;
632 rtx parallel_insn;
633
634 /* We need to provide a customized rtx which contains
635 necessary information for data analysis,
636 so we create a parallel rtx like this:
637 (parallel [
638 (set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
639 (reg:SI Rb))
640 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
641 (reg:SI Rb+1))
642 ...
643 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
644 (reg:SI Re))
645 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
646 (reg:SI FP_REGNUM))
647 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
648 (reg:SI GP_REGNUM))
649 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
650 (reg:SI LP_REGNUM))
651 (set (reg:SI SP_REGNUM)
652 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
653
654 /* Calculate the number of registers that will be pushed.
655 Since $fp, $gp, and $lp is always pushed with v3push instruction,
656 we need to count these three registers.
657 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
658 So there is no need to worry about Rb=Re=SP_REGNUM case. */
659 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
660
661 /* In addition to used registers,
662 we need one more space for (set sp sp-x-imm8u) rtx. */
663 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
664 rtvec_alloc (num_use_regs + 1));
665 par_index = 0;
666
667 /* Initialize offset and start to create push behavior. */
668 offset = -(num_use_regs * 4);
669
670 /* Create (set mem regX) from Rb, Rb+1 up to Re.
671 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
672 So there is no need to worry about Rb=Re=SP_REGNUM case. */
673 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
674 {
675 reg = gen_rtx_REG (SImode, regno);
676 mem = gen_frame_mem (SImode, plus_constant (Pmode,
677 stack_pointer_rtx,
678 offset));
679 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
680 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
681 RTX_FRAME_RELATED_P (push_rtx) = 1;
682 offset = offset + 4;
683 par_index++;
684 }
685
686 /* Create (set mem fp). */
687 reg = gen_rtx_REG (SImode, FP_REGNUM);
688 mem = gen_frame_mem (SImode, plus_constant (Pmode,
689 stack_pointer_rtx,
690 offset));
691 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
692 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
693 RTX_FRAME_RELATED_P (push_rtx) = 1;
694 offset = offset + 4;
695 par_index++;
696 /* Create (set mem gp). */
697 reg = gen_rtx_REG (SImode, GP_REGNUM);
698 mem = gen_frame_mem (SImode, plus_constant (Pmode,
699 stack_pointer_rtx,
700 offset));
701 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
702 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
703 RTX_FRAME_RELATED_P (push_rtx) = 1;
704 offset = offset + 4;
705 par_index++;
706 /* Create (set mem lp). */
707 reg = gen_rtx_REG (SImode, LP_REGNUM);
708 mem = gen_frame_mem (SImode, plus_constant (Pmode,
709 stack_pointer_rtx,
710 offset));
711 push_rtx = gen_rtx_SET (VOIDmode, mem, reg);
712 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
713 RTX_FRAME_RELATED_P (push_rtx) = 1;
714 offset = offset + 4;
715 par_index++;
716
717 /* Create (set sp sp-x-imm8u). */
718
719 /* We need to re-calculate the offset value again for adjustment. */
720 offset = -(num_use_regs * 4);
721 adjust_sp_rtx
722 = gen_rtx_SET (VOIDmode,
723 stack_pointer_rtx,
724 plus_constant (Pmode,
725 stack_pointer_rtx,
726 offset - INTVAL (imm8u)));
727 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
728 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
729
730 return parallel_insn;
731 }
732
733 /* Function to create a parallel rtx pattern
734 which presents stack v3pop behavior.
735 The overall concept are:
736 "pop registers from memory",
737 "adjust stack pointer". */
738 static rtx
739 nds32_gen_stack_v3pop (rtx Rb,
740 rtx Re,
741 rtx En4 ATTRIBUTE_UNUSED,
742 rtx imm8u)
743 {
744 int regno;
745 int num_use_regs;
746 int par_index;
747 int offset;
748
749 rtx reg;
750 rtx mem;
751 rtx pop_rtx;
752 rtx adjust_sp_rtx;
753 rtx parallel_insn;
754
755 /* We need to provide a customized rtx which contains
756 necessary information for data analysis,
757 so we create a parallel rtx like this:
758 (parallel [(set (reg:SI Rb)
759 (mem (reg:SI SP_REGNUM)))
760 (set (reg:SI Rb+1)
761 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
762 ...
763 (set (reg:SI Re)
764 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
765 (set (reg:SI FP_REGNUM)
766 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
767 (set (reg:SI GP_REGNUM)
768 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
769 (set (reg:SI LP_REGNUM)
770 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
771 (set (reg:SI SP_REGNUM)
772 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
773
774 /* Calculate the number of registers that will be poped.
775 Since $fp, $gp, and $lp is always poped with v3pop instruction,
776 we need to count these three registers.
777 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
778 So there is no need to worry about Rb=Re=SP_REGNUM case. */
779 num_use_regs = REGNO (Re) - REGNO (Rb) + 1 + 3;
780
781 /* In addition to used registers,
782 we need one more space for (set sp sp+x+imm8u) rtx. */
783 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
784 rtvec_alloc (num_use_regs + 1));
785 par_index = 0;
786
787 /* Initialize offset and start to create pop behavior. */
788 offset = 0;
789
790 /* Create (set regX mem) from Rb, Rb+1 up to Re.
791 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
792 So there is no need to worry about Rb=Re=SP_REGNUM case. */
793 for (regno = REGNO (Rb); regno <= (int) REGNO (Re); regno++)
794 {
795 reg = gen_rtx_REG (SImode, regno);
796 mem = gen_frame_mem (SImode, plus_constant (Pmode,
797 stack_pointer_rtx,
798 offset));
799 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
800 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
801 RTX_FRAME_RELATED_P (pop_rtx) = 1;
802 offset = offset + 4;
803 par_index++;
804 }
805
806 /* Create (set fp mem). */
807 reg = gen_rtx_REG (SImode, FP_REGNUM);
808 mem = gen_frame_mem (SImode, plus_constant (Pmode,
809 stack_pointer_rtx,
810 offset));
811 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
812 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
813 RTX_FRAME_RELATED_P (pop_rtx) = 1;
814 offset = offset + 4;
815 par_index++;
816 /* Create (set gp mem). */
817 reg = gen_rtx_REG (SImode, GP_REGNUM);
818 mem = gen_frame_mem (SImode, plus_constant (Pmode,
819 stack_pointer_rtx,
820 offset));
821 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
822 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
823 RTX_FRAME_RELATED_P (pop_rtx) = 1;
824 offset = offset + 4;
825 par_index++;
826 /* Create (set lp mem ). */
827 reg = gen_rtx_REG (SImode, LP_REGNUM);
828 mem = gen_frame_mem (SImode, plus_constant (Pmode,
829 stack_pointer_rtx,
830 offset));
831 pop_rtx = gen_rtx_SET (VOIDmode, reg, mem);
832 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
833 RTX_FRAME_RELATED_P (pop_rtx) = 1;
834 offset = offset + 4;
835 par_index++;
836
837 /* Create (set sp sp+x+imm8u). */
838
839 /* The offset value is already in place. No need to re-calculate it. */
840 adjust_sp_rtx
841 = gen_rtx_SET (VOIDmode,
842 stack_pointer_rtx,
843 plus_constant (Pmode,
844 stack_pointer_rtx,
845 offset + INTVAL (imm8u)));
846 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
847 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
848
849 return parallel_insn;
850 }
851
852 /* Function that may creates more instructions
853 for large value on adjusting stack pointer.
854
855 In nds32 target, 'addi' can be used for stack pointer
856 adjustment in prologue/epilogue stage.
857 However, sometimes there are too many local variables so that
858 the adjustment value is not able to be fit in the 'addi' instruction.
859 One solution is to move value into a register
860 and then use 'add' instruction.
861 In practice, we use TA_REGNUM ($r15) to accomplish this purpose.
862 Also, we need to return zero for sp adjustment so that
863 proglogue/epilogue knows there is no need to create 'addi' instruction. */
864 static int
865 nds32_force_addi_stack_int (int full_value)
866 {
867 int adjust_value;
868
869 rtx tmp_reg;
870 rtx sp_adjust_insn;
871
872 if (!satisfies_constraint_Is15 (GEN_INT (full_value)))
873 {
874 /* The value is not able to fit in single addi instruction.
875 Create more instructions of moving value into a register
876 and then add stack pointer with it. */
877
878 /* $r15 is going to be temporary register to hold the value. */
879 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
880
881 /* Create one more instruction to move value
882 into the temporary register. */
883 emit_move_insn (tmp_reg, GEN_INT (full_value));
884
885 /* Create new 'add' rtx. */
886 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
887 stack_pointer_rtx,
888 tmp_reg);
889 /* Emit rtx into insn list and receive its transformed insn rtx. */
890 sp_adjust_insn = emit_insn (sp_adjust_insn);
891
892 /* At prologue, we need to tell GCC that this is frame related insn,
893 so that we can consider this instruction to output debug information.
894 If full_value is NEGATIVE, it means this function
895 is invoked by expand_prologue. */
896 if (full_value < 0)
897 {
898 /* Because (tmp_reg <- full_value) may be split into two
899 rtl patterns, we can not set its RTX_FRAME_RELATED_P.
900 We need to construct another (sp <- sp + full_value)
901 and then insert it into sp_adjust_insn's reg note to
902 represent a frame related expression.
903 GCC knows how to refer it and output debug information. */
904
905 rtx plus_rtx;
906 rtx set_rtx;
907
908 plus_rtx = plus_constant (Pmode, stack_pointer_rtx, full_value);
909 set_rtx = gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_rtx);
910 add_reg_note (sp_adjust_insn, REG_FRAME_RELATED_EXPR, set_rtx);
911
912 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
913 }
914
915 /* We have used alternative way to adjust stack pointer value.
916 Return zero so that prologue/epilogue
917 will not generate other instructions. */
918 return 0;
919 }
920 else
921 {
922 /* The value is able to fit in addi instruction.
923 However, remember to make it to be positive value
924 because we want to return 'adjustment' result. */
925 adjust_value = (full_value < 0) ? (-full_value) : (full_value);
926
927 return adjust_value;
928 }
929 }
930
931 /* Return true if MODE/TYPE need double word alignment. */
932 static bool
933 nds32_needs_double_word_align (enum machine_mode mode, const_tree type)
934 {
935 unsigned int align;
936
937 /* Pick up the alignment according to the mode or type. */
938 align = NDS32_MODE_TYPE_ALIGN (mode, type);
939
940 return (align > PARM_BOUNDARY);
941 }
942
943 /* Return true if FUNC is a naked function. */
944 static bool
945 nds32_naked_function_p (tree func)
946 {
947 tree t;
948
949 if (TREE_CODE (func) != FUNCTION_DECL)
950 abort ();
951
952 t = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
953
954 return (t != NULL_TREE);
955 }
956
957 /* Function that check if 'X' is a valid address register.
958 The variable 'STRICT' is very important to
959 make decision for register number.
960
961 STRICT : true
962 => We are in reload pass or after reload pass.
963 The register number should be strictly limited in general registers.
964
965 STRICT : false
966 => Before reload pass, we are free to use any register number. */
967 static bool
968 nds32_address_register_rtx_p (rtx x, bool strict)
969 {
970 int regno;
971
972 if (GET_CODE (x) != REG)
973 return false;
974
975 regno = REGNO (x);
976
977 if (strict)
978 return REGNO_OK_FOR_BASE_P (regno);
979 else
980 return true;
981 }
982
983 /* Function that check if 'INDEX' is valid to be a index rtx for address.
984
985 OUTER_MODE : Machine mode of outer address rtx.
986 INDEX : Check if this rtx is valid to be a index for address.
987 STRICT : If it is true, we are in reload pass or after reload pass. */
988 static bool
989 nds32_legitimate_index_p (enum machine_mode outer_mode,
990 rtx index,
991 bool strict)
992 {
993 int regno;
994 rtx op0;
995 rtx op1;
996
997 switch (GET_CODE (index))
998 {
999 case REG:
1000 regno = REGNO (index);
1001 /* If we are in reload pass or after reload pass,
1002 we need to limit it to general register. */
1003 if (strict)
1004 return REGNO_OK_FOR_INDEX_P (regno);
1005 else
1006 return true;
1007
1008 case CONST_INT:
1009 /* The alignment of the integer value is determined by 'outer_mode'. */
1010 if (GET_MODE_SIZE (outer_mode) == 1)
1011 {
1012 /* Further check if the value is legal for the 'outer_mode'. */
1013 if (!satisfies_constraint_Is15 (index))
1014 return false;
1015
1016 /* Pass all test, the value is valid, return true. */
1017 return true;
1018 }
1019 if (GET_MODE_SIZE (outer_mode) == 2
1020 && NDS32_HALF_WORD_ALIGN_P (INTVAL (index)))
1021 {
1022 /* Further check if the value is legal for the 'outer_mode'. */
1023 if (!satisfies_constraint_Is16 (index))
1024 return false;
1025
1026 /* Pass all test, the value is valid, return true. */
1027 return true;
1028 }
1029 if (GET_MODE_SIZE (outer_mode) == 4
1030 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1031 {
1032 /* Further check if the value is legal for the 'outer_mode'. */
1033 if (!satisfies_constraint_Is17 (index))
1034 return false;
1035
1036 /* Pass all test, the value is valid, return true. */
1037 return true;
1038 }
1039 if (GET_MODE_SIZE (outer_mode) == 8
1040 && NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1041 {
1042 /* Further check if the value is legal for the 'outer_mode'. */
1043 if (!satisfies_constraint_Is17 (gen_int_mode (INTVAL (index) + 4,
1044 SImode)))
1045 return false;
1046
1047 /* Pass all test, the value is valid, return true. */
1048 return true;
1049 }
1050
1051 return false;
1052
1053 case MULT:
1054 op0 = XEXP (index, 0);
1055 op1 = XEXP (index, 1);
1056
1057 if (REG_P (op0) && CONST_INT_P (op1))
1058 {
1059 int multiplier;
1060 multiplier = INTVAL (op1);
1061
1062 /* We only allow (mult reg const_int_1)
1063 or (mult reg const_int_2) or (mult reg const_int_4). */
1064 if (multiplier != 1 && multiplier != 2 && multiplier != 4)
1065 return false;
1066
1067 regno = REGNO (op0);
1068 /* Limit it in general registers if we are
1069 in reload pass or after reload pass. */
1070 if(strict)
1071 return REGNO_OK_FOR_INDEX_P (regno);
1072 else
1073 return true;
1074 }
1075
1076 return false;
1077
1078 case ASHIFT:
1079 op0 = XEXP (index, 0);
1080 op1 = XEXP (index, 1);
1081
1082 if (REG_P (op0) && CONST_INT_P (op1))
1083 {
1084 int sv;
1085 /* op1 is already the sv value for use to do left shift. */
1086 sv = INTVAL (op1);
1087
1088 /* We only allow (ashift reg const_int_0)
1089 or (ashift reg const_int_1) or (ashift reg const_int_2). */
1090 if (sv != 0 && sv != 1 && sv !=2)
1091 return false;
1092
1093 regno = REGNO (op0);
1094 /* Limit it in general registers if we are
1095 in reload pass or after reload pass. */
1096 if(strict)
1097 return REGNO_OK_FOR_INDEX_P (regno);
1098 else
1099 return true;
1100 }
1101
1102 return false;
1103
1104 default:
1105 return false;
1106 }
1107 }
1108
1109 /* A helper function to return character based on byte size. */
1110 static char
1111 nds32_byte_to_size (int byte)
1112 {
1113 switch (byte)
1114 {
1115 case 4:
1116 return 'w';
1117 case 2:
1118 return 'h';
1119 case 1:
1120 return 'b';
1121 default:
1122 /* Normally it should not be here. */
1123 gcc_unreachable ();
1124 }
1125 }
1126
1127 /* A helper function to check if this function should contain prologue. */
1128 static int
1129 nds32_have_prologue_p (void)
1130 {
1131 int i;
1132
1133 for (i = 0; i < 28; i++)
1134 if (NDS32_REQUIRED_CALLEE_SAVED_P (i))
1135 return 1;
1136
1137 return (flag_pic
1138 || NDS32_REQUIRED_CALLEE_SAVED_P (FP_REGNUM)
1139 || NDS32_REQUIRED_CALLEE_SAVED_P (LP_REGNUM));
1140 }
1141
1142 /* ------------------------------------------------------------------------ */
1143
1144 /* PART 3: Implement target hook stuff definitions. */
1145 \f
1146 /* Register Classes. */
1147
1148 static unsigned char
1149 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1150 enum machine_mode mode)
1151 {
1152 /* Return the maximum number of consecutive registers
1153 needed to represent "mode" in a register of "rclass". */
1154 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
1155 }
1156
1157 static int
1158 nds32_register_priority (int hard_regno)
1159 {
1160 /* Encourage to use r0-r7 for LRA when optimize for size. */
1161 if (optimize_size && hard_regno < 8)
1162 return 4;
1163 return 3;
1164 }
1165
1166 \f
1167 /* Stack Layout and Calling Conventions. */
1168
1169 /* There are three kinds of pointer concepts using in GCC compiler:
1170
1171 frame pointer: A pointer to the first location of local variables.
1172 stack pointer: A pointer to the top of a stack frame.
1173 argument pointer: A pointer to the incoming arguments.
1174
1175 In nds32 target calling convention, we are using 8-byte alignment.
1176 Besides, we would like to have each stack frame of a function includes:
1177
1178 [Block A]
1179 1. previous hard frame pointer
1180 2. return address
1181 3. callee-saved registers
1182 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1183 and save it at
1184 cfun->machine->callee_saved_area_padding_bytes)
1185
1186 [Block B]
1187 1. local variables
1188 2. spilling location
1189 3. <padding bytes> (it will be calculated by GCC itself)
1190 4. incoming arguments
1191 5. <padding bytes> (it will be calculated by GCC itself)
1192
1193 [Block C]
1194 1. <padding bytes> (it will be calculated by GCC itself)
1195 2. outgoing arguments
1196
1197 We 'wrap' these blocks together with
1198 hard frame pointer ($r28) and stack pointer ($r31).
1199 By applying the basic frame/stack/argument pointers concept,
1200 the layout of a stack frame shoule be like this:
1201
1202 | |
1203 old stack pointer -> ----
1204 | | \
1205 | | saved arguments for
1206 | | vararg functions
1207 | | /
1208 hard frame pointer -> --
1209 & argument pointer | | \
1210 | | previous hardware frame pointer
1211 | | return address
1212 | | callee-saved registers
1213 | | /
1214 frame pointer -> --
1215 | | \
1216 | | local variables
1217 | | and incoming arguments
1218 | | /
1219 --
1220 | | \
1221 | | outgoing
1222 | | arguments
1223 | | /
1224 stack pointer -> ----
1225
1226 $SFP and $AP are used to represent frame pointer and arguments pointer,
1227 which will be both eliminated as hard frame pointer. */
1228
1229 /* -- Eliminating Frame Pointer and Arg Pointer. */
1230
1231 static bool nds32_can_eliminate (const int from_reg, const int to_reg)
1232 {
1233 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1234 return true;
1235
1236 if (from_reg == ARG_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1237 return true;
1238
1239 if (from_reg == FRAME_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1240 return true;
1241
1242 if (from_reg == FRAME_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1243 return true;
1244
1245 return false;
1246 }
1247
1248 /* -- Passing Arguments in Registers. */
1249
1250 static rtx
1251 nds32_function_arg (cumulative_args_t ca, enum machine_mode mode,
1252 const_tree type, bool named)
1253 {
1254 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1255
1256 /* The last time this hook is called,
1257 it is called with MODE == VOIDmode. */
1258 if (mode == VOIDmode)
1259 return NULL_RTX;
1260
1261 /* For nameless arguments, they are passed on the stack. */
1262 if (!named)
1263 return NULL_RTX;
1264
1265 /* If there are still registers available, return it. */
1266 if (NDS32_ARG_PASS_IN_REG_P (cum->reg_offset, mode, type))
1267 {
1268 /* Pick up the next available register number. */
1269 unsigned int regno;
1270
1271 regno = NDS32_AVAILABLE_REGNUM_FOR_ARG (cum->reg_offset, mode, type);
1272 return gen_rtx_REG (mode, regno);
1273 }
1274 else
1275 {
1276 /* No register available, return NULL_RTX.
1277 The compiler will use stack to pass argument instead. */
1278 return NULL_RTX;
1279 }
1280 }
1281
1282 static void
1283 nds32_function_arg_advance (cumulative_args_t ca, enum machine_mode mode,
1284 const_tree type, bool named)
1285 {
1286 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1287
1288 /* Advance next register for use.
1289 Only named argument could be advanced. */
1290 if (named)
1291 {
1292 cum->reg_offset
1293 = NDS32_AVAILABLE_REGNUM_FOR_ARG (cum->reg_offset, mode, type)
1294 - NDS32_GPR_ARG_FIRST_REGNUM
1295 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1296 }
1297 }
1298
1299 static unsigned int
1300 nds32_function_arg_boundary (enum machine_mode mode, const_tree type)
1301 {
1302 return (nds32_needs_double_word_align (mode, type)
1303 ? NDS32_DOUBLE_WORD_ALIGNMENT
1304 : PARM_BOUNDARY);
1305 }
1306
1307 /* -- How Scalar Function Values Are Returned. */
1308
1309 static rtx
1310 nds32_function_value (const_tree ret_type,
1311 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1312 bool outgoing ATTRIBUTE_UNUSED)
1313 {
1314 enum machine_mode mode;
1315 int unsignedp;
1316
1317 mode = TYPE_MODE (ret_type);
1318 unsignedp = TYPE_UNSIGNED (ret_type);
1319
1320 mode = promote_mode (ret_type, mode, &unsignedp);
1321
1322 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1323 }
1324
1325 static rtx
1326 nds32_libcall_value (enum machine_mode mode,
1327 const_rtx fun ATTRIBUTE_UNUSED)
1328 {
1329 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
1330 }
1331
1332 static bool
1333 nds32_function_value_regno_p (const unsigned int regno)
1334 {
1335 return (regno == NDS32_GPR_RET_FIRST_REGNUM);
1336 }
1337
1338 /* -- Function Entry and Exit. */
1339
1340 /* The content produced from this function
1341 will be placed before prologue body. */
1342 static void
1343 nds32_asm_function_prologue (FILE *file,
1344 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1345 {
1346 int r;
1347 const char *func_name;
1348 tree attrs;
1349 tree name;
1350
1351 /* All stack frame information is supposed to be
1352 already computed when expanding prologue.
1353 The result is in cfun->machine.
1354 DO NOT call nds32_compute_stack_frame() here
1355 because it may corrupt the essential information. */
1356
1357 fprintf (file, "\t! BEGIN PROLOGUE\n");
1358 fprintf (file, "\t! fp needed: %d\n", frame_pointer_needed);
1359 fprintf (file, "\t! pretend_args: %d\n", cfun->machine->va_args_size);
1360 fprintf (file, "\t! local_size: %d\n", cfun->machine->local_size);
1361 fprintf (file, "\t! out_args_size: %d\n", cfun->machine->out_args_size);
1362
1363 /* Use df_regs_ever_live_p() to detect if the register
1364 is ever used in the current function. */
1365 fprintf (file, "\t! registers ever_live: ");
1366 for (r = 0; r < 32; r++)
1367 {
1368 if (df_regs_ever_live_p (r))
1369 fprintf (file, "%s, ", reg_names[r]);
1370 }
1371 fputc ('\n', file);
1372
1373 /* Display the attributes of this function. */
1374 fprintf (file, "\t! function attributes: ");
1375 /* Get the attributes tree list.
1376 Note that GCC builds attributes list with reverse order. */
1377 attrs = DECL_ATTRIBUTES (current_function_decl);
1378
1379 /* If there is no any attribute, print out "None". */
1380 if (!attrs)
1381 fprintf (file, "None");
1382
1383 /* If there are some attributes, try if we need to
1384 construct isr vector information. */
1385 func_name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
1386 nds32_construct_isr_vectors_information (attrs, func_name);
1387
1388 /* Display all attributes of this function. */
1389 while (attrs)
1390 {
1391 name = TREE_PURPOSE (attrs);
1392 fprintf (file, "%s ", IDENTIFIER_POINTER (name));
1393
1394 /* Pick up the next attribute. */
1395 attrs = TREE_CHAIN (attrs);
1396 }
1397 fputc ('\n', file);
1398 }
1399
1400 /* After rtl prologue has been expanded, this function is used. */
1401 static void
1402 nds32_asm_function_end_prologue (FILE *file)
1403 {
1404 fprintf (file, "\t! END PROLOGUE\n");
1405
1406 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1407 we can generate special directive: ".omit_fp_begin"
1408 to guide linker doing fp-as-gp optimization.
1409 However, for a naked function, which means
1410 it should not have prologue/epilogue,
1411 using fp-as-gp still requires saving $fp by push/pop behavior and
1412 there is no benefit to use fp-as-gp on such small function.
1413 So we need to make sure this function is NOT naked as well. */
1414 if (!frame_pointer_needed
1415 && !cfun->machine->naked_p
1416 && cfun->machine->fp_as_gp_p)
1417 {
1418 fprintf (file, "\t! ----------------------------------------\n");
1419 fprintf (file, "\t! Guide linker to do "
1420 "link time optimization: fp-as-gp\n");
1421 fprintf (file, "\t! We add one more instruction to "
1422 "initialize $fp near to $gp location.\n");
1423 fprintf (file, "\t! If linker fails to use fp-as-gp transformation,\n");
1424 fprintf (file, "\t! this extra instruction should be "
1425 "eliminated at link stage.\n");
1426 fprintf (file, "\t.omit_fp_begin\n");
1427 fprintf (file, "\tla\t$fp,_FP_BASE_\n");
1428 fprintf (file, "\t! ----------------------------------------\n");
1429 }
1430 }
1431
1432 /* Before rtl epilogue has been expanded, this function is used. */
1433 static void
1434 nds32_asm_function_begin_epilogue (FILE *file)
1435 {
1436 /* If frame pointer is NOT needed and -mfp-as-gp is issued,
1437 we can generate special directive: ".omit_fp_end"
1438 to claim fp-as-gp optimization range.
1439 However, for a naked function,
1440 which means it should not have prologue/epilogue,
1441 using fp-as-gp still requires saving $fp by push/pop behavior and
1442 there is no benefit to use fp-as-gp on such small function.
1443 So we need to make sure this function is NOT naked as well. */
1444 if (!frame_pointer_needed
1445 && !cfun->machine->naked_p
1446 && cfun->machine->fp_as_gp_p)
1447 {
1448 fprintf (file, "\t! ----------------------------------------\n");
1449 fprintf (file, "\t! Claim the range of fp-as-gp "
1450 "link time optimization\n");
1451 fprintf (file, "\t.omit_fp_end\n");
1452 fprintf (file, "\t! ----------------------------------------\n");
1453 }
1454
1455 fprintf (file, "\t! BEGIN EPILOGUE\n");
1456 }
1457
1458 /* The content produced from this function
1459 will be placed after epilogue body. */
1460 static void
1461 nds32_asm_function_epilogue (FILE *file,
1462 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
1463 {
1464 fprintf (file, "\t! END EPILOGUE\n");
1465 }
1466
1467 static void
1468 nds32_asm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
1469 HOST_WIDE_INT delta,
1470 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
1471 tree function)
1472 {
1473 int this_regno;
1474
1475 /* Make sure unwind info is emitted for the thunk if needed. */
1476 final_start_function (emit_barrier (), file, 1);
1477
1478 this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
1479 ? 1
1480 : 0);
1481
1482 if (delta != 0)
1483 {
1484 if (satisfies_constraint_Is15 (GEN_INT (delta)))
1485 {
1486 fprintf (file, "\taddi\t$r%d, $r%d, %ld\n",
1487 this_regno, this_regno, delta);
1488 }
1489 else if (satisfies_constraint_Is20 (GEN_INT (delta)))
1490 {
1491 fprintf (file, "\tmovi\t$ta, %ld\n", delta);
1492 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1493 }
1494 else
1495 {
1496 fprintf (file, "\tsethi\t$ta, hi20(%ld)\n", delta);
1497 fprintf (file, "\tori\t$ta, $ta, lo12(%ld)\n", delta);
1498 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
1499 }
1500 }
1501
1502 fprintf (file, "\tb\t");
1503 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
1504 fprintf (file, "\n");
1505
1506 final_end_function ();
1507 }
1508
1509 /* -- Permitting tail calls. */
1510
1511 /* Determine whether we need to enable warning for function return check. */
1512 static bool
1513 nds32_warn_func_return (tree decl)
1514 {
1515 /* Naked functions are implemented entirely in assembly, including the
1516 return sequence, so suppress warnings about this. */
1517 return !nds32_naked_function_p (decl);
1518 }
1519
1520 \f
1521 /* Implementing the Varargs Macros. */
1522
1523 static bool
1524 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1525 {
1526 /* Return true so that all the named arguments for FUNCTION_ARG have named=1.
1527 If return false, for the variadic function, all named arguments EXCEPT
1528 the last are treated as named. */
1529 return true;
1530 }
1531
1532 \f
1533 /* Trampolines for Nested Functions. */
1534
1535 static void
1536 nds32_asm_trampoline_template (FILE *f)
1537 {
1538 if (TARGET_REDUCED_REGS)
1539 {
1540 /* Trampoline is not supported on reduced-set registers yet. */
1541 sorry ("a nested function is not supported for reduced registers");
1542 }
1543 else
1544 {
1545 asm_fprintf (f, "\t! Trampoline code template\n");
1546 asm_fprintf (f, "\t! This code fragment will be copied "
1547 "into stack on demand\n");
1548
1549 asm_fprintf (f, "\tmfusr\t$r16,$pc\n");
1550 asm_fprintf (f, "\tlwi\t$r15,[$r16 + 20] "
1551 "! load nested function address\n");
1552 asm_fprintf (f, "\tlwi\t$r16,[$r16 + 16] "
1553 "! load chain_value\n");
1554 asm_fprintf (f, "\tjr\t$r15\n");
1555 }
1556
1557 /* Preserve space ($pc + 16) for saving chain_value,
1558 nds32_trampoline_init will fill the value in this slot. */
1559 asm_fprintf (f, "\t! space for saving chain_value\n");
1560 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1561
1562 /* Preserve space ($pc + 20) for saving nested function address,
1563 nds32_trampoline_init will fill the value in this slot. */
1564 asm_fprintf (f, "\t! space for saving nested function address\n");
1565 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
1566 }
1567
1568 /* Emit RTL insns to initialize the variable parts of a trampoline. */
1569 static void
1570 nds32_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
1571 {
1572 int i;
1573
1574 /* Nested function address. */
1575 rtx fnaddr;
1576 /* The memory rtx that is going to
1577 be filled with chain_value. */
1578 rtx chain_value_mem;
1579 /* The memory rtx that is going to
1580 be filled with nested function address. */
1581 rtx nested_func_mem;
1582
1583 /* Start address of trampoline code in stack, for doing cache sync. */
1584 rtx sync_cache_addr;
1585 /* Temporary register for sync instruction. */
1586 rtx tmp_reg;
1587 /* Instruction-cache sync instruction,
1588 requesting an argument as starting address. */
1589 rtx isync_insn;
1590 /* For convenience reason of doing comparison. */
1591 int tramp_align_in_bytes;
1592
1593 /* Trampoline is not supported on reduced-set registers yet. */
1594 if (TARGET_REDUCED_REGS)
1595 sorry ("a nested function is not supported for reduced registers");
1596
1597 /* STEP 1: Copy trampoline code template into stack,
1598 fill up essential data into stack. */
1599
1600 /* Extract nested function address rtx. */
1601 fnaddr = XEXP (DECL_RTL (fndecl), 0);
1602
1603 /* m_tramp is memory rtx that is going to be filled with trampoline code.
1604 We have nds32_asm_trampoline_template() to emit template pattern. */
1605 emit_block_move (m_tramp, assemble_trampoline_template (),
1606 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
1607
1608 /* After copying trampoline code into stack,
1609 fill chain_value into stack. */
1610 chain_value_mem = adjust_address (m_tramp, SImode, 16);
1611 emit_move_insn (chain_value_mem, chain_value);
1612 /* After copying trampoline code int stack,
1613 fill nested function address into stack. */
1614 nested_func_mem = adjust_address (m_tramp, SImode, 20);
1615 emit_move_insn (nested_func_mem, fnaddr);
1616
1617 /* STEP 2: Sync instruction-cache. */
1618
1619 /* We have successfully filled trampoline code into stack.
1620 However, in order to execute code in stack correctly,
1621 we must sync instruction cache. */
1622 sync_cache_addr = XEXP (m_tramp, 0);
1623 tmp_reg = gen_reg_rtx (SImode);
1624 isync_insn = gen_unspec_volatile_isync (tmp_reg);
1625
1626 /* Because nds32_cache_block_size is in bytes,
1627 we get trampoline alignment in bytes for convenient comparison. */
1628 tramp_align_in_bytes = TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT;
1629
1630 if (tramp_align_in_bytes >= nds32_cache_block_size
1631 && (tramp_align_in_bytes % nds32_cache_block_size) == 0)
1632 {
1633 /* Under this condition, the starting address of trampoline
1634 must be aligned to the starting address of each cache block
1635 and we do not have to worry about cross-boundary issue. */
1636 for (i = 0;
1637 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1638 / nds32_cache_block_size;
1639 i++)
1640 {
1641 emit_move_insn (tmp_reg,
1642 plus_constant (Pmode, sync_cache_addr,
1643 nds32_cache_block_size * i));
1644 emit_insn (isync_insn);
1645 }
1646 }
1647 else if (TRAMPOLINE_SIZE > nds32_cache_block_size)
1648 {
1649 /* The starting address of trampoline code
1650 may not be aligned to the cache block,
1651 so the trampoline code may be across two cache block.
1652 We need to sync the last element, which is 4-byte size,
1653 of trampoline template. */
1654 for (i = 0;
1655 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
1656 / nds32_cache_block_size;
1657 i++)
1658 {
1659 emit_move_insn (tmp_reg,
1660 plus_constant (Pmode, sync_cache_addr,
1661 nds32_cache_block_size * i));
1662 emit_insn (isync_insn);
1663 }
1664
1665 /* The last element of trampoline template is 4-byte size. */
1666 emit_move_insn (tmp_reg,
1667 plus_constant (Pmode, sync_cache_addr,
1668 TRAMPOLINE_SIZE - 4));
1669 emit_insn (isync_insn);
1670 }
1671 else
1672 {
1673 /* This is the simplest case.
1674 Because TRAMPOLINE_SIZE is less than or
1675 equal to nds32_cache_block_size,
1676 we can just sync start address and
1677 the last element of trampoline code. */
1678
1679 /* Sync starting address of tampoline code. */
1680 emit_move_insn (tmp_reg, sync_cache_addr);
1681 emit_insn (isync_insn);
1682 /* Sync the last element, which is 4-byte size,
1683 of trampoline template. */
1684 emit_move_insn (tmp_reg,
1685 plus_constant (Pmode, sync_cache_addr,
1686 TRAMPOLINE_SIZE - 4));
1687 emit_insn (isync_insn);
1688 }
1689
1690 /* Set instruction serialization barrier
1691 to guarantee the correct operations. */
1692 emit_insn (gen_unspec_volatile_isb ());
1693 }
1694
1695 \f
1696 /* Addressing Modes. */
1697
1698 static bool
1699 nds32_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1700 {
1701 /* For (mem:DI addr) or (mem:DF addr) case,
1702 we only allow 'addr' to be [reg], [symbol_ref],
1703 [const], or [reg + const_int] pattern. */
1704 if (mode == DImode || mode == DFmode)
1705 {
1706 /* Allow [Reg + const_int] addressing mode. */
1707 if (GET_CODE (x) == PLUS)
1708 {
1709 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
1710 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict)
1711 && CONST_INT_P (XEXP (x, 1)))
1712 return true;
1713
1714 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
1715 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict)
1716 && CONST_INT_P (XEXP (x, 0)))
1717 return true;
1718 }
1719
1720 /* Now check [reg], [symbol_ref], and [const]. */
1721 if (GET_CODE (x) != REG
1722 && GET_CODE (x) != SYMBOL_REF
1723 && GET_CODE (x) != CONST)
1724 return false;
1725 }
1726
1727 /* Check if 'x' is a valid address. */
1728 switch (GET_CODE (x))
1729 {
1730 case REG:
1731 /* (mem (reg A)) => [Ra] */
1732 return nds32_address_register_rtx_p (x, strict);
1733
1734 case SYMBOL_REF:
1735
1736 if (!TARGET_GP_DIRECT
1737 && (reload_completed
1738 || reload_in_progress
1739 || lra_in_progress))
1740 return false;
1741
1742 /* (mem (symbol_ref A)) => [symbol_ref] */
1743 return !currently_expanding_to_rtl;
1744
1745 case CONST:
1746
1747 if (!TARGET_GP_DIRECT
1748 && (reload_completed
1749 || reload_in_progress
1750 || lra_in_progress))
1751 return false;
1752
1753 /* (mem (const (...)))
1754 => [ + const_addr ], where const_addr = symbol_ref + const_int */
1755 if (GET_CODE (XEXP (x, 0)) == PLUS)
1756 {
1757 rtx plus_op = XEXP (x, 0);
1758
1759 rtx op0 = XEXP (plus_op, 0);
1760 rtx op1 = XEXP (plus_op, 1);
1761
1762 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
1763 return true;
1764 else
1765 return false;
1766 }
1767
1768 return false;
1769
1770 case POST_MODIFY:
1771 /* (mem (post_modify (reg) (plus (reg) (reg))))
1772 => [Ra], Rb */
1773 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1774 => [Ra], const_int */
1775 if (GET_CODE (XEXP (x, 0)) == REG
1776 && GET_CODE (XEXP (x, 1)) == PLUS)
1777 {
1778 rtx plus_op = XEXP (x, 1);
1779
1780 rtx op0 = XEXP (plus_op, 0);
1781 rtx op1 = XEXP (plus_op, 1);
1782
1783 if (nds32_address_register_rtx_p (op0, strict)
1784 && nds32_legitimate_index_p (mode, op1, strict))
1785 return true;
1786 else
1787 return false;
1788 }
1789
1790 return false;
1791
1792 case POST_INC:
1793 case POST_DEC:
1794 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
1795 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
1796 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
1797 We only need to deal with register Ra. */
1798 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
1799 return true;
1800 else
1801 return false;
1802
1803 case PLUS:
1804 /* (mem (plus reg const_int))
1805 => [Ra + imm] */
1806 /* (mem (plus reg reg))
1807 => [Ra + Rb] */
1808 /* (mem (plus (mult reg const_int) reg))
1809 => [Ra + Rb << sv] */
1810 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
1811 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict))
1812 return true;
1813 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
1814 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict))
1815 return true;
1816 else
1817 return false;
1818
1819 case LO_SUM:
1820 if (!TARGET_GP_DIRECT)
1821 return true;
1822
1823 default:
1824 return false;
1825 }
1826 }
1827
1828 \f
1829 /* Describing Relative Costs of Operations. */
1830
1831 static int nds32_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1832 reg_class_t from,
1833 reg_class_t to)
1834 {
1835 if (from == HIGH_REGS || to == HIGH_REGS)
1836 return 6;
1837
1838 return 2;
1839 }
1840
1841 static int nds32_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1842 reg_class_t rclass ATTRIBUTE_UNUSED,
1843 bool in ATTRIBUTE_UNUSED)
1844 {
1845 return 8;
1846 }
1847
1848 /* This target hook describes the relative costs of RTL expressions.
1849 Return 'true' when all subexpressions of x have been processed.
1850 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
1851 Refer to gcc/rtlanal.c for more information. */
1852 static bool
1853 nds32_rtx_costs (rtx x,
1854 int code,
1855 int outer_code,
1856 int opno,
1857 int *total,
1858 bool speed)
1859 {
1860 return nds32_rtx_costs_impl (x, code, outer_code, opno, total, speed);
1861 }
1862
1863 static int nds32_address_cost (rtx address,
1864 enum machine_mode mode,
1865 addr_space_t as,
1866 bool speed)
1867 {
1868 return nds32_address_cost_impl (address, mode, as, speed);
1869 }
1870
1871 \f
1872 /* Defining the Output Assembler Language. */
1873
1874 /* -- The Overall Framework of an Assembler File. */
1875
1876 static void
1877 nds32_asm_file_start (void)
1878 {
1879 default_file_start ();
1880
1881 /* Tell assembler which ABI we are using. */
1882 fprintf (asm_out_file, "\t! ABI version\n");
1883 fprintf (asm_out_file, "\t.abi_2\n");
1884
1885 /* Tell assembler that this asm code is generated by compiler. */
1886 fprintf (asm_out_file, "\t! This asm file is generated by compiler\n");
1887 fprintf (asm_out_file, "\t.flag\tverbatim\n");
1888 /* Give assembler the size of each vector for interrupt handler. */
1889 fprintf (asm_out_file, "\t! This vector size directive is required "
1890 "for checking inconsistency on interrupt handler\n");
1891 fprintf (asm_out_file, "\t.vec_size\t%d\n", nds32_isr_vector_size);
1892
1893 /* If user enables '-mforce-fp-as-gp' or compiles programs with -Os,
1894 the compiler may produce 'la $fp,_FP_BASE_' instruction
1895 at prologue for fp-as-gp optimization.
1896 We should emit weak reference of _FP_BASE_ to avoid undefined reference
1897 in case user does not pass '--relax' option to linker. */
1898 if (TARGET_FORCE_FP_AS_GP || optimize_size)
1899 {
1900 fprintf (asm_out_file, "\t! This weak reference is required to do "
1901 "fp-as-gp link time optimization\n");
1902 fprintf (asm_out_file, "\t.weak\t_FP_BASE_\n");
1903 }
1904 /* If user enables '-mex9', we should emit relaxation directive
1905 to tell linker that this file is allowed to do ex9 optimization. */
1906 if (TARGET_EX9)
1907 {
1908 fprintf (asm_out_file, "\t! This relaxation directive is required "
1909 "to do ex9 link time optimization\n");
1910 fprintf (asm_out_file, "\t.relax\tex9\n");
1911 }
1912
1913 fprintf (asm_out_file, "\t! ------------------------------------\n");
1914
1915 if (TARGET_ISA_V2)
1916 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V2");
1917 if (TARGET_ISA_V3)
1918 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3");
1919 if (TARGET_ISA_V3M)
1920 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3M");
1921
1922 fprintf (asm_out_file, "\t! Endian setting\t: %s\n",
1923 ((TARGET_BIG_ENDIAN) ? "big-endian"
1924 : "little-endian"));
1925
1926 fprintf (asm_out_file, "\t! ------------------------------------\n");
1927
1928 fprintf (asm_out_file, "\t! Use conditional move\t\t: %s\n",
1929 ((TARGET_CMOV) ? "Yes"
1930 : "No"));
1931 fprintf (asm_out_file, "\t! Use performance extension\t: %s\n",
1932 ((TARGET_PERF_EXT) ? "Yes"
1933 : "No"));
1934
1935 fprintf (asm_out_file, "\t! ------------------------------------\n");
1936
1937 fprintf (asm_out_file, "\t! V3PUSH instructions\t: %s\n",
1938 ((TARGET_V3PUSH) ? "Yes"
1939 : "No"));
1940 fprintf (asm_out_file, "\t! 16-bit instructions\t: %s\n",
1941 ((TARGET_16_BIT) ? "Yes"
1942 : "No"));
1943 fprintf (asm_out_file, "\t! GP base access\t: %s\n",
1944 ((TARGET_GP_DIRECT) ? "Yes"
1945 : "No"));
1946 fprintf (asm_out_file, "\t! Reduced registers set\t: %s\n",
1947 ((TARGET_REDUCED_REGS) ? "Yes"
1948 : "No"));
1949
1950 fprintf (asm_out_file, "\t! ------------------------------------\n");
1951
1952 if (optimize_size)
1953 fprintf (asm_out_file, "\t! Optimization level\t: -Os\n");
1954 else
1955 fprintf (asm_out_file, "\t! Optimization level\t: -O%d\n", optimize);
1956
1957 fprintf (asm_out_file, "\t! ------------------------------------\n");
1958
1959 fprintf (asm_out_file, "\t! Cache block size\t: %d\n",
1960 nds32_cache_block_size);
1961
1962 fprintf (asm_out_file, "\t! ------------------------------------\n");
1963
1964 nds32_asm_file_start_for_isr ();
1965 }
1966
1967 static void
1968 nds32_asm_file_end (void)
1969 {
1970 nds32_asm_file_end_for_isr ();
1971
1972 fprintf (asm_out_file, "\t! ------------------------------------\n");
1973 }
1974
1975 /* -- Output and Generation of Labels. */
1976
1977 static void
1978 nds32_asm_globalize_label (FILE *stream, const char *name)
1979 {
1980 fputs ("\t.global\t", stream);
1981 assemble_name (stream, name);
1982 fputs ("\n", stream);
1983 }
1984
1985 /* -- Output of Assembler Instructions. */
1986
1987 static void
1988 nds32_print_operand (FILE *stream, rtx x, int code)
1989 {
1990 int op_value;
1991
1992 switch (code)
1993 {
1994 case 0 :
1995 /* Do nothing special. */
1996 break;
1997
1998 case 'V':
1999 /* 'x' is supposed to be CONST_INT, get the value. */
2000 gcc_assert (CONST_INT_P (x));
2001 op_value = INTVAL (x);
2002
2003 /* According to the Andes architecture,
2004 the system/user register index range is 0 ~ 1023.
2005 In order to avoid conflict between user-specified-integer value
2006 and enum-specified-register value,
2007 the 'enum nds32_intrinsic_registers' value
2008 in nds32_intrinsic.h starts from 1024. */
2009 if (op_value < 1024 && op_value >= 0)
2010 {
2011 /* If user gives integer value directly (0~1023),
2012 we just print out the value. */
2013 fprintf (stream, "%d", op_value);
2014 }
2015 else if (op_value < 0
2016 || op_value >= ((int) ARRAY_SIZE (nds32_intrinsic_register_names)
2017 + 1024))
2018 {
2019 /* The enum index value for array size is out of range. */
2020 error ("intrinsic register index is out of range");
2021 }
2022 else
2023 {
2024 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
2025 we can print out register name. Remember to substract 1024. */
2026 fprintf (stream, "%s",
2027 nds32_intrinsic_register_names[op_value - 1024]);
2028 }
2029
2030 /* No need to handle following process, so return immediately. */
2031 return;
2032
2033 default :
2034 /* Unknown flag. */
2035 output_operand_lossage ("invalid operand output code");
2036 break;
2037 }
2038
2039 switch (GET_CODE (x))
2040 {
2041 case LABEL_REF:
2042 case SYMBOL_REF:
2043 output_addr_const (stream, x);
2044 break;
2045
2046 case REG:
2047 /* Forbid using static chain register ($r16)
2048 on reduced-set registers configuration. */
2049 if (TARGET_REDUCED_REGS
2050 && REGNO (x) == STATIC_CHAIN_REGNUM)
2051 sorry ("a nested function is not supported for reduced registers");
2052
2053 /* Normal cases, print out register name. */
2054 fputs (reg_names[REGNO (x)], stream);
2055 break;
2056
2057 case MEM:
2058 output_address (XEXP (x, 0));
2059 break;
2060
2061 case CODE_LABEL:
2062 case CONST_INT:
2063 case CONST:
2064 output_addr_const (stream, x);
2065 break;
2066
2067 default:
2068 /* Generally, output_addr_const () is able to handle most cases.
2069 We want to see what CODE could appear,
2070 so we use gcc_unreachable() to stop it. */
2071 debug_rtx (x);
2072 gcc_unreachable ();
2073 break;
2074 }
2075 }
2076
2077 static void
2078 nds32_print_operand_address (FILE *stream, rtx x)
2079 {
2080 rtx op0, op1;
2081
2082 switch (GET_CODE (x))
2083 {
2084 case SYMBOL_REF:
2085 case CONST:
2086 /* [ + symbol_ref] */
2087 /* [ + const_addr], where const_addr = symbol_ref + const_int */
2088 fputs ("[ + ", stream);
2089 output_addr_const (stream, x);
2090 fputs ("]", stream);
2091 break;
2092
2093 case REG:
2094 /* Forbid using static chain register ($r16)
2095 on reduced-set registers configuration. */
2096 if (TARGET_REDUCED_REGS
2097 && REGNO (x) == STATIC_CHAIN_REGNUM)
2098 sorry ("a nested function is not supported for reduced registers");
2099
2100 /* [Ra] */
2101 fprintf (stream, "[%s]", reg_names[REGNO (x)]);
2102 break;
2103
2104 case PLUS:
2105 op0 = XEXP (x, 0);
2106 op1 = XEXP (x, 1);
2107
2108 /* Checking op0, forbid using static chain register ($r16)
2109 on reduced-set registers configuration. */
2110 if (TARGET_REDUCED_REGS
2111 && REG_P (op0)
2112 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2113 sorry ("a nested function is not supported for reduced registers");
2114 /* Checking op1, forbid using static chain register ($r16)
2115 on reduced-set registers configuration. */
2116 if (TARGET_REDUCED_REGS
2117 && REG_P (op1)
2118 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2119 sorry ("a nested function is not supported for reduced registers");
2120
2121 if (REG_P (op0) && CONST_INT_P (op1))
2122 {
2123 /* [Ra + imm] */
2124 fprintf (stream, "[%s + (%d)]",
2125 reg_names[REGNO (op0)], (int)INTVAL (op1));
2126 }
2127 else if (REG_P (op0) && REG_P (op1))
2128 {
2129 /* [Ra + Rb] */
2130 fprintf (stream, "[%s + %s]",
2131 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2132 }
2133 else if (GET_CODE (op0) == MULT && REG_P (op1))
2134 {
2135 /* [Ra + Rb << sv]
2136 From observation, the pattern looks like:
2137 (plus:SI (mult:SI (reg:SI 58)
2138 (const_int 4 [0x4]))
2139 (reg/f:SI 57)) */
2140 int sv;
2141
2142 /* We need to set sv to output shift value. */
2143 if (INTVAL (XEXP (op0, 1)) == 1)
2144 sv = 0;
2145 else if (INTVAL (XEXP (op0, 1)) == 2)
2146 sv = 1;
2147 else if (INTVAL (XEXP (op0, 1)) == 4)
2148 sv = 2;
2149 else
2150 gcc_unreachable ();
2151
2152 fprintf (stream, "[%s + %s << %d]",
2153 reg_names[REGNO (op1)],
2154 reg_names[REGNO (XEXP (op0, 0))],
2155 sv);
2156 }
2157 else
2158 {
2159 /* The control flow is not supposed to be here. */
2160 debug_rtx (x);
2161 gcc_unreachable ();
2162 }
2163
2164 break;
2165
2166 case POST_MODIFY:
2167 /* (post_modify (regA) (plus (regA) (regB)))
2168 (post_modify (regA) (plus (regA) (const_int)))
2169 We would like to extract
2170 regA and regB (or const_int) from plus rtx. */
2171 op0 = XEXP (XEXP (x, 1), 0);
2172 op1 = XEXP (XEXP (x, 1), 1);
2173
2174 /* Checking op0, forbid using static chain register ($r16)
2175 on reduced-set registers configuration. */
2176 if (TARGET_REDUCED_REGS
2177 && REG_P (op0)
2178 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2179 sorry ("a nested function is not supported for reduced registers");
2180 /* Checking op1, forbid using static chain register ($r16)
2181 on reduced-set registers configuration. */
2182 if (TARGET_REDUCED_REGS
2183 && REG_P (op1)
2184 && REGNO (op1) == STATIC_CHAIN_REGNUM)
2185 sorry ("a nested function is not supported for reduced registers");
2186
2187 if (REG_P (op0) && REG_P (op1))
2188 {
2189 /* [Ra], Rb */
2190 fprintf (stream, "[%s], %s",
2191 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
2192 }
2193 else if (REG_P (op0) && CONST_INT_P (op1))
2194 {
2195 /* [Ra], imm */
2196 fprintf (stream, "[%s], %d",
2197 reg_names[REGNO (op0)], (int)INTVAL (op1));
2198 }
2199 else
2200 {
2201 /* The control flow is not supposed to be here. */
2202 debug_rtx (x);
2203 gcc_unreachable ();
2204 }
2205
2206 break;
2207
2208 case POST_INC:
2209 case POST_DEC:
2210 op0 = XEXP (x, 0);
2211
2212 /* Checking op0, forbid using static chain register ($r16)
2213 on reduced-set registers configuration. */
2214 if (TARGET_REDUCED_REGS
2215 && REG_P (op0)
2216 && REGNO (op0) == STATIC_CHAIN_REGNUM)
2217 sorry ("a nested function is not supported for reduced registers");
2218
2219 if (REG_P (op0))
2220 {
2221 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
2222 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2223 We only need to deal with register Ra. */
2224 fprintf (stream, "[%s]", reg_names[REGNO (op0)]);
2225 }
2226 else
2227 {
2228 /* The control flow is not supposed to be here. */
2229 debug_rtx (x);
2230 gcc_unreachable ();
2231 }
2232
2233 break;
2234
2235 default :
2236 /* Generally, output_addr_const () is able to handle most cases.
2237 We want to see what CODE could appear,
2238 so we use gcc_unreachable() to stop it. */
2239 debug_rtx (x);
2240 gcc_unreachable ();
2241 break;
2242 }
2243 }
2244
2245 \f
2246 /* Defining target-specific uses of __attribute__. */
2247
2248 /* Add some checking after merging attributes. */
2249 static tree
2250 nds32_merge_decl_attributes (tree olddecl, tree newdecl)
2251 {
2252 tree combined_attrs;
2253
2254 /* Create combined attributes. */
2255 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
2256 DECL_ATTRIBUTES (newdecl));
2257
2258 /* Since newdecl is acutally a duplicate of olddecl,
2259 we can take olddecl for some operations. */
2260 if (TREE_CODE (olddecl) == FUNCTION_DECL)
2261 {
2262 /* Check isr-specific attributes conflict. */
2263 nds32_check_isr_attrs_conflict (olddecl, combined_attrs);
2264 }
2265
2266 return combined_attrs;
2267 }
2268
2269 /* Add some checking when inserting attributes. */
2270 static void
2271 nds32_insert_attributes (tree decl, tree *attributes)
2272 {
2273 /* For function declaration, we need to check isr-specific attributes:
2274 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
2275 2. Check valid integer value for interrupt/exception.
2276 3. Check valid integer value for reset.
2277 4. Check valid function for nmi/warm. */
2278 if (TREE_CODE (decl) == FUNCTION_DECL)
2279 {
2280 tree func_attrs;
2281 tree intr, excp, reset;
2282
2283 /* Pick up function attributes. */
2284 func_attrs = *attributes;
2285
2286 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
2287 nds32_check_isr_attrs_conflict (decl, func_attrs);
2288
2289 /* Now we are starting to check valid id value
2290 for interrupt/exception/reset.
2291 Note that we ONLY check its validity here.
2292 To construct isr vector information, it is still performed
2293 by nds32_construct_isr_vectors_information(). */
2294 intr = lookup_attribute ("interrupt", func_attrs);
2295 excp = lookup_attribute ("exception", func_attrs);
2296 reset = lookup_attribute ("reset", func_attrs);
2297
2298 if (intr || excp)
2299 {
2300 /* Deal with interrupt/exception. */
2301 tree id_list;
2302 unsigned int lower_bound, upper_bound;
2303
2304 /* The way to handle interrupt or exception is the same,
2305 we just need to take care of actual vector number.
2306 For interrupt(0..63), the actual vector number is (9..72).
2307 For exception(1..8), the actual vector number is (1..8). */
2308 lower_bound = (intr) ? (0) : (1);
2309 upper_bound = (intr) ? (63) : (8);
2310
2311 /* Prepare id list so that we can traverse id value. */
2312 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
2313
2314 /* 2. Check valid integer value for interrupt/exception. */
2315 while (id_list)
2316 {
2317 tree id;
2318
2319 /* Pick up each vector id value. */
2320 id = TREE_VALUE (id_list);
2321 /* Issue error if it is not a valid integer value. */
2322 if (TREE_CODE (id) != INTEGER_CST
2323 || wi::ltu_p (id, lower_bound)
2324 || wi::gtu_p (id, upper_bound))
2325 error ("invalid id value for interrupt/exception attribute");
2326
2327 /* Advance to next id. */
2328 id_list = TREE_CHAIN (id_list);
2329 }
2330 }
2331 else if (reset)
2332 {
2333 /* Deal with reset. */
2334 tree id_list;
2335 tree id;
2336 tree nmi, warm;
2337 unsigned int lower_bound;
2338 unsigned int upper_bound;
2339
2340 /* Prepare id_list and identify id value so that
2341 we can check if total number of vectors is valid. */
2342 id_list = TREE_VALUE (reset);
2343 id = TREE_VALUE (id_list);
2344
2345 /* The maximum numbers for user's interrupt is 64. */
2346 lower_bound = 0;
2347 upper_bound = 64;
2348
2349 /* 3. Check valid integer value for reset. */
2350 if (TREE_CODE (id) != INTEGER_CST
2351 || wi::ltu_p (id, lower_bound)
2352 || wi::gtu_p (id, upper_bound))
2353 error ("invalid id value for reset attribute");
2354
2355 /* 4. Check valid function for nmi/warm. */
2356 nmi = lookup_attribute ("nmi", func_attrs);
2357 warm = lookup_attribute ("warm", func_attrs);
2358
2359 if (nmi != NULL_TREE)
2360 {
2361 tree nmi_func_list;
2362 tree nmi_func;
2363
2364 nmi_func_list = TREE_VALUE (nmi);
2365 nmi_func = TREE_VALUE (nmi_func_list);
2366
2367 /* Issue error if it is not a valid nmi function. */
2368 if (TREE_CODE (nmi_func) != IDENTIFIER_NODE)
2369 error ("invalid nmi function for reset attribute");
2370 }
2371
2372 if (warm != NULL_TREE)
2373 {
2374 tree warm_func_list;
2375 tree warm_func;
2376
2377 warm_func_list = TREE_VALUE (warm);
2378 warm_func = TREE_VALUE (warm_func_list);
2379
2380 /* Issue error if it is not a valid warm function. */
2381 if (TREE_CODE (warm_func) != IDENTIFIER_NODE)
2382 error ("invalid warm function for reset attribute");
2383 }
2384 }
2385 else
2386 {
2387 /* No interrupt, exception, or reset attribute is set. */
2388 return;
2389 }
2390 }
2391 }
2392
2393 static bool
2394 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED,
2395 tree pop_target ATTRIBUTE_UNUSED)
2396 {
2397 /* Currently, we do not parse any pragma target by ourself,
2398 so just simply return false. */
2399 return false;
2400 }
2401
2402 static void
2403 nds32_option_override (void)
2404 {
2405 /* After all the command options have been parsed,
2406 we shall deal with some flags for changing compiler settings. */
2407
2408 /* At first, we check if we have to strictly
2409 set some flags based on ISA family. */
2410 if (TARGET_ISA_V2)
2411 {
2412 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
2413 target_flags &= ~MASK_V3PUSH;
2414 }
2415 if (TARGET_ISA_V3)
2416 {
2417 /* Under V3 ISA, currently nothing should be strictly set. */
2418 }
2419 if (TARGET_ISA_V3M)
2420 {
2421 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
2422 target_flags |= MASK_REDUCED_REGS;
2423 /* Under V3M ISA, we need to strictly disable TARGET_PERF_EXT. */
2424 target_flags &= ~MASK_PERF_EXT;
2425 }
2426
2427 /* See if we are using reduced-set registers:
2428 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
2429 If so, we must forbid using $r11~$r14, $r16~$r27. */
2430 if (TARGET_REDUCED_REGS)
2431 {
2432 int r;
2433
2434 /* Prevent register allocator from
2435 choosing it as doing register allocation. */
2436 for (r = 11; r <= 14; r++)
2437 fixed_regs[r] = call_used_regs[r] = 1;
2438 for (r = 16; r <= 27; r++)
2439 fixed_regs[r] = call_used_regs[r] = 1;
2440 }
2441
2442 /* See if user explicitly would like to use fp-as-gp optimization.
2443 If so, we must prevent $fp from being allocated
2444 during register allocation. */
2445 if (TARGET_FORCE_FP_AS_GP)
2446 fixed_regs[FP_REGNUM] = call_used_regs[FP_REGNUM] = 1;
2447
2448 if (!TARGET_16_BIT)
2449 {
2450 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
2451 target_flags &= ~MASK_V3PUSH;
2452 }
2453
2454 /* Currently, we don't support PIC code generation yet. */
2455 if (flag_pic)
2456 sorry ("not support -fpic");
2457 }
2458
2459 \f
2460 /* Miscellaneous Parameters. */
2461
2462 static void
2463 nds32_init_builtins (void)
2464 {
2465 nds32_init_builtins_impl ();
2466 }
2467
2468 static rtx
2469 nds32_expand_builtin (tree exp,
2470 rtx target,
2471 rtx subtarget,
2472 enum machine_mode mode,
2473 int ignore)
2474 {
2475 return nds32_expand_builtin_impl (exp, target, subtarget, mode, ignore);
2476 }
2477
2478
2479 /* ------------------------------------------------------------------------ */
2480
2481 /* PART 4: Implemet extern function definitions,
2482 the prototype is in nds32-protos.h. */
2483 \f
2484 /* Defining Data Structures for Per-function Information. */
2485
2486 void
2487 nds32_init_expanders (void)
2488 {
2489 /* Arrange to initialize and mark the machine per-function status. */
2490 init_machine_status = nds32_init_machine_status;
2491 }
2492
2493 \f
2494 /* Register Usage. */
2495
2496 /* -- How Values Fit in Registers. */
2497
2498 int
2499 nds32_hard_regno_nregs (int regno ATTRIBUTE_UNUSED,
2500 enum machine_mode mode)
2501 {
2502 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
2503 }
2504
2505 int
2506 nds32_hard_regno_mode_ok (int regno, enum machine_mode mode)
2507 {
2508 /* Restrict double-word quantities to even register pairs. */
2509 if (HARD_REGNO_NREGS (regno, mode) == 1
2510 || !((regno) & 1))
2511 return 1;
2512
2513 return 0;
2514 }
2515
2516 \f
2517 /* Register Classes. */
2518
2519 enum reg_class
2520 nds32_regno_reg_class (int regno)
2521 {
2522 /* Refer to nds32.h for more register class details. */
2523
2524 if (regno >= 0 && regno <= 7)
2525 return LOW_REGS;
2526 else if (regno >= 8 && regno <= 11)
2527 return MIDDLE_REGS;
2528 else if (regno >= 12 && regno <= 14)
2529 return HIGH_REGS;
2530 else if (regno == 15)
2531 return R15_TA_REG;
2532 else if (regno >= 16 && regno <= 19)
2533 return MIDDLE_REGS;
2534 else if (regno >= 20 && regno <= 31)
2535 return HIGH_REGS;
2536 else if (regno == 32 || regno == 33)
2537 return FRAME_REGS;
2538 else
2539 return NO_REGS;
2540 }
2541
2542 \f
2543 /* Stack Layout and Calling Conventions. */
2544
2545 /* -- Basic Stack Layout. */
2546
2547 rtx
2548 nds32_return_addr_rtx (int count,
2549 rtx frameaddr ATTRIBUTE_UNUSED)
2550 {
2551 /* There is no way to determine the return address
2552 if frameaddr is the frame that has 'count' steps
2553 up from current frame. */
2554 if (count != 0)
2555 return NULL_RTX;
2556
2557 /* If count == 0, it means we are at current frame,
2558 the return address is $r30 ($lp). */
2559 return get_hard_reg_initial_val (Pmode, LP_REGNUM);
2560 }
2561
2562 /* -- Eliminating Frame Pointer and Arg Pointer. */
2563
2564 HOST_WIDE_INT
2565 nds32_initial_elimination_offset (unsigned int from_reg, unsigned int to_reg)
2566 {
2567 HOST_WIDE_INT offset;
2568
2569 /* Compute and setup stack frame size.
2570 The result will be in cfun->machine. */
2571 nds32_compute_stack_frame ();
2572
2573 /* Remember to consider
2574 cfun->machine->callee_saved_area_padding_bytes
2575 when calculating offset. */
2576 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
2577 {
2578 offset = (cfun->machine->fp_size
2579 + cfun->machine->gp_size
2580 + cfun->machine->lp_size
2581 + cfun->machine->callee_saved_regs_size
2582 + cfun->machine->callee_saved_area_padding_bytes
2583 + cfun->machine->local_size
2584 + cfun->machine->out_args_size);
2585 }
2586 else if (from_reg == ARG_POINTER_REGNUM
2587 && to_reg == HARD_FRAME_POINTER_REGNUM)
2588 {
2589 offset = 0;
2590 }
2591 else if (from_reg == FRAME_POINTER_REGNUM
2592 && to_reg == STACK_POINTER_REGNUM)
2593 {
2594 offset = (cfun->machine->local_size + cfun->machine->out_args_size);
2595 }
2596 else if (from_reg == FRAME_POINTER_REGNUM
2597 && to_reg == HARD_FRAME_POINTER_REGNUM)
2598 {
2599 offset = (-1) * (cfun->machine->fp_size
2600 + cfun->machine->gp_size
2601 + cfun->machine->lp_size
2602 + cfun->machine->callee_saved_regs_size
2603 + cfun->machine->callee_saved_area_padding_bytes);
2604 }
2605 else
2606 {
2607 gcc_unreachable ();
2608 }
2609
2610 return offset;
2611 }
2612
2613 /* -- Passing Arguments in Registers. */
2614
2615 void
2616 nds32_init_cumulative_args (CUMULATIVE_ARGS *cum,
2617 tree fntype ATTRIBUTE_UNUSED,
2618 rtx libname ATTRIBUTE_UNUSED,
2619 tree fndecl ATTRIBUTE_UNUSED,
2620 int n_named_args ATTRIBUTE_UNUSED)
2621 {
2622 /* Initial available registers
2623 (in offset, corresponding to NDS32_GPR_ARG_FIRST_REGNUM)
2624 for passing arguments. */
2625 cum->reg_offset = 0;
2626 }
2627
2628 /* -- Function Entry and Exit. */
2629
2630 /* Function for normal multiple push prologue. */
2631 void
2632 nds32_expand_prologue (void)
2633 {
2634 int fp_adjust;
2635 int sp_adjust;
2636 int en4_const;
2637
2638 rtx Rb, Re;
2639 rtx push_insn;
2640 rtx fp_adjust_insn, sp_adjust_insn;
2641
2642 /* Before computing everything for stack frame size,
2643 we check if it is still worth to use fp_as_gp optimization.
2644 If it is, the 'df_regs_ever_live_p (FP_REGNUM)' will be set
2645 so that $fp will be saved on stack. */
2646 cfun->machine->fp_as_gp_p = nds32_fp_as_gp_check_available ();
2647
2648 /* Compute and setup stack frame size.
2649 The result will be in cfun->machine. */
2650 nds32_compute_stack_frame ();
2651
2652 /* If the function is 'naked',
2653 we do not have to generate prologue code fragment. */
2654 if (cfun->machine->naked_p)
2655 return;
2656
2657 /* Get callee_first_regno and callee_last_regno. */
2658 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
2659 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
2660
2661 /* push_insn = gen_stack_push_multiple(first_regno, last_regno),
2662 the pattern 'stack_push_multiple' is implemented in nds32.md.
2663 For En4 field, we have to calculate its constant value.
2664 Refer to Andes ISA for more information. */
2665 en4_const = 0;
2666 if (cfun->machine->fp_size)
2667 en4_const += 8;
2668 if (cfun->machine->gp_size)
2669 en4_const += 4;
2670 if (cfun->machine->lp_size)
2671 en4_const += 2;
2672
2673 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2674 to be saved, we don't have to create multiple push instruction.
2675 Otherwise, a multiple push instruction is needed. */
2676 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
2677 {
2678 /* Create multiple push instruction rtx. */
2679 push_insn = nds32_gen_stack_push_multiple (Rb, Re, GEN_INT (en4_const));
2680 /* Emit rtx into instructions list and receive INSN rtx form. */
2681 push_insn = emit_insn (push_insn);
2682
2683 /* The insn rtx 'push_insn' will change frame layout.
2684 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2685 generate CFI (Call Frame Information) stuff. */
2686 RTX_FRAME_RELATED_P (push_insn) = 1;
2687 }
2688
2689 /* Check frame_pointer_needed to see
2690 if we shall emit fp adjustment instruction. */
2691 if (frame_pointer_needed)
2692 {
2693 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
2694 + (4 * callee-saved-registers)
2695 Note: No need to adjust
2696 cfun->machine->callee_saved_area_padding_bytes,
2697 because, at this point, stack pointer is just
2698 at the position after push instruction. */
2699 fp_adjust = cfun->machine->fp_size
2700 + cfun->machine->gp_size
2701 + cfun->machine->lp_size
2702 + cfun->machine->callee_saved_regs_size;
2703 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
2704 stack_pointer_rtx,
2705 GEN_INT (fp_adjust));
2706 /* Emit rtx into instructions list and receive INSN rtx form. */
2707 fp_adjust_insn = emit_insn (fp_adjust_insn);
2708 }
2709
2710 /* Adjust $sp = $sp - local_size - out_args_size
2711 - callee_saved_area_padding_bytes. */
2712 sp_adjust = cfun->machine->local_size
2713 + cfun->machine->out_args_size
2714 + cfun->machine->callee_saved_area_padding_bytes;
2715 /* sp_adjust value may be out of range of the addi instruction,
2716 create alternative add behavior with TA_REGNUM if necessary,
2717 using NEGATIVE value to tell that we are decreasing address. */
2718 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
2719 if (sp_adjust)
2720 {
2721 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
2722 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2723 stack_pointer_rtx,
2724 GEN_INT (-1 * sp_adjust));
2725 /* Emit rtx into instructions list and receive INSN rtx form. */
2726 sp_adjust_insn = emit_insn (sp_adjust_insn);
2727
2728 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2729 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2730 generate CFI (Call Frame Information) stuff. */
2731 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
2732 }
2733
2734 /* Prevent the instruction scheduler from
2735 moving instructions across the boundary. */
2736 emit_insn (gen_blockage ());
2737 }
2738
2739 /* Function for normal multiple pop epilogue. */
2740 void
2741 nds32_expand_epilogue (void)
2742 {
2743 int sp_adjust;
2744 int en4_const;
2745
2746 rtx Rb, Re;
2747 rtx pop_insn;
2748 rtx sp_adjust_insn;
2749
2750 /* Compute and setup stack frame size.
2751 The result will be in cfun->machine. */
2752 nds32_compute_stack_frame ();
2753
2754 /* Prevent the instruction scheduler from
2755 moving instructions across the boundary. */
2756 emit_insn (gen_blockage ());
2757
2758 /* If the function is 'naked', we do not have to generate
2759 epilogue code fragment BUT 'ret' instruction. */
2760 if (cfun->machine->naked_p)
2761 {
2762 /* Generate return instruction by using
2763 unspec_volatile_func_return pattern.
2764 Make sure this instruction is after gen_blockage().
2765 NOTE that $lp will become 'live'
2766 after this instruction has been emitted. */
2767 emit_insn (gen_unspec_volatile_func_return ());
2768 return;
2769 }
2770
2771 if (frame_pointer_needed)
2772 {
2773 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
2774 - (4 * callee-saved-registers)
2775 Note: No need to adjust
2776 cfun->machine->callee_saved_area_padding_bytes,
2777 because we want to adjust stack pointer
2778 to the position for pop instruction. */
2779 sp_adjust = cfun->machine->fp_size
2780 + cfun->machine->gp_size
2781 + cfun->machine->lp_size
2782 + cfun->machine->callee_saved_regs_size;
2783 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2784 hard_frame_pointer_rtx,
2785 GEN_INT (-1 * sp_adjust));
2786 /* Emit rtx into instructions list and receive INSN rtx form. */
2787 sp_adjust_insn = emit_insn (sp_adjust_insn);
2788 }
2789 else
2790 {
2791 /* If frame pointer is NOT needed,
2792 we cannot calculate the sp adjustment from frame pointer.
2793 Instead, we calculate the adjustment by local_size,
2794 out_args_size, and callee_saved_area_padding_bytes.
2795 Notice that such sp adjustment value may be out of range,
2796 so we have to deal with it as well. */
2797
2798 /* Adjust $sp = $sp + local_size + out_args_size
2799 + callee_saved_area_padding_bytes. */
2800 sp_adjust = cfun->machine->local_size
2801 + cfun->machine->out_args_size
2802 + cfun->machine->callee_saved_area_padding_bytes;
2803 /* sp_adjust value may be out of range of the addi instruction,
2804 create alternative add behavior with TA_REGNUM if necessary,
2805 using POSITIVE value to tell that we are increasing address. */
2806 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
2807 if (sp_adjust)
2808 {
2809 /* Generate sp adjustment instruction
2810 if and only if sp_adjust != 0. */
2811 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2812 stack_pointer_rtx,
2813 GEN_INT (sp_adjust));
2814 /* Emit rtx into instructions list and receive INSN rtx form. */
2815 sp_adjust_insn = emit_insn (sp_adjust_insn);
2816 }
2817 }
2818
2819 /* Get callee_first_regno and callee_last_regno. */
2820 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
2821 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
2822
2823 /* pop_insn = gen_stack_pop_multiple(first_regno, last_regno),
2824 the pattern 'stack_pop_multiple' is implementad in nds32.md.
2825 For En4 field, we have to calculate its constant value.
2826 Refer to Andes ISA for more information. */
2827 en4_const = 0;
2828 if (cfun->machine->fp_size)
2829 en4_const += 8;
2830 if (cfun->machine->gp_size)
2831 en4_const += 4;
2832 if (cfun->machine->lp_size)
2833 en4_const += 2;
2834
2835 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
2836 to be saved, we don't have to create multiple pop instruction.
2837 Otherwise, a multiple pop instruction is needed. */
2838 if (!(REGNO (Rb) == SP_REGNUM && REGNO (Re) == SP_REGNUM && en4_const == 0))
2839 {
2840 /* Create multiple pop instruction rtx. */
2841 pop_insn = nds32_gen_stack_pop_multiple (Rb, Re, GEN_INT (en4_const));
2842 /* Emit pop instruction. */
2843 emit_insn (pop_insn);
2844 }
2845
2846 /* Generate return instruction by using
2847 unspec_volatile_func_return pattern. */
2848 emit_insn (gen_unspec_volatile_func_return ());
2849 }
2850
2851 /* Function for v3push prologue. */
2852 void
2853 nds32_expand_prologue_v3push (void)
2854 {
2855 int fp_adjust;
2856 int sp_adjust;
2857
2858 rtx Rb, Re;
2859 rtx push_insn;
2860 rtx fp_adjust_insn, sp_adjust_insn;
2861
2862 /* Before computing everything for stack frame size,
2863 we check if it is still worth to use fp_as_gp optimization.
2864 If it is, the 'df_regs_ever_live_p (FP_REGNUM)' will be set
2865 so that $fp will be saved on stack. */
2866 cfun->machine->fp_as_gp_p = nds32_fp_as_gp_check_available ();
2867
2868 /* Compute and setup stack frame size.
2869 The result will be in cfun->machine. */
2870 nds32_compute_stack_frame ();
2871
2872 /* If the function is 'naked',
2873 we do not have to generate prologue code fragment. */
2874 if (cfun->machine->naked_p)
2875 return;
2876
2877 /* Get callee_first_regno and callee_last_regno. */
2878 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
2879 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
2880
2881 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
2882 where imm8u has to be 8-byte alignment. */
2883 sp_adjust = cfun->machine->local_size
2884 + cfun->machine->out_args_size
2885 + cfun->machine->callee_saved_area_padding_bytes;
2886
2887 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
2888 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
2889 {
2890 /* We can use 'push25 Re,imm8u'. */
2891
2892 /* push_insn = gen_stack_v3push(last_regno, sp_adjust),
2893 the pattern 'stack_v3push' is implemented in nds32.md.
2894 The (const_int 14) means v3push always push { $fp $gp $lp }. */
2895 push_insn = nds32_gen_stack_v3push (Rb, Re,
2896 GEN_INT (14), GEN_INT (sp_adjust));
2897 /* emit rtx into instructions list and receive INSN rtx form */
2898 push_insn = emit_insn (push_insn);
2899
2900 /* The insn rtx 'push_insn' will change frame layout.
2901 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2902 generate CFI (Call Frame Information) stuff. */
2903 RTX_FRAME_RELATED_P (push_insn) = 1;
2904
2905 /* Check frame_pointer_needed to see
2906 if we shall emit fp adjustment instruction. */
2907 if (frame_pointer_needed)
2908 {
2909 /* adjust $fp = $sp + 4 ($fp size)
2910 + 4 ($gp size)
2911 + 4 ($lp size)
2912 + (4 * n) (callee-saved registers)
2913 + sp_adjust ('push25 Re,imm8u')
2914 Note: Since we use 'push25 Re,imm8u',
2915 the position of stack pointer is further
2916 changed after push instruction.
2917 Hence, we need to take sp_adjust value
2918 into consideration. */
2919 fp_adjust = cfun->machine->fp_size
2920 + cfun->machine->gp_size
2921 + cfun->machine->lp_size
2922 + cfun->machine->callee_saved_regs_size
2923 + sp_adjust;
2924 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
2925 stack_pointer_rtx,
2926 GEN_INT (fp_adjust));
2927 /* Emit rtx into instructions list and receive INSN rtx form. */
2928 fp_adjust_insn = emit_insn (fp_adjust_insn);
2929 }
2930 }
2931 else
2932 {
2933 /* We have to use 'push25 Re,0' and
2934 expand one more instruction to adjust $sp later. */
2935
2936 /* push_insn = gen_stack_v3push(last_regno, sp_adjust),
2937 the pattern 'stack_v3push' is implemented in nds32.md.
2938 The (const_int 14) means v3push always push { $fp $gp $lp }. */
2939 push_insn = nds32_gen_stack_v3push (Rb, Re,
2940 GEN_INT (14), GEN_INT (0));
2941 /* Emit rtx into instructions list and receive INSN rtx form. */
2942 push_insn = emit_insn (push_insn);
2943
2944 /* The insn rtx 'push_insn' will change frame layout.
2945 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2946 generate CFI (Call Frame Information) stuff. */
2947 RTX_FRAME_RELATED_P (push_insn) = 1;
2948
2949 /* Check frame_pointer_needed to see
2950 if we shall emit fp adjustment instruction. */
2951 if (frame_pointer_needed)
2952 {
2953 /* adjust $fp = $sp + 4 ($fp size)
2954 + 4 ($gp size)
2955 + 4 ($lp size)
2956 + (4 * n) (callee-saved registers)
2957 Note: Since we use 'push25 Re,0',
2958 the stack pointer is just at the position
2959 after push instruction.
2960 No need to take sp_adjust into consideration. */
2961 fp_adjust = cfun->machine->fp_size
2962 + cfun->machine->gp_size
2963 + cfun->machine->lp_size
2964 + cfun->machine->callee_saved_regs_size;
2965 fp_adjust_insn = gen_addsi3 (hard_frame_pointer_rtx,
2966 stack_pointer_rtx,
2967 GEN_INT (fp_adjust));
2968 /* Emit rtx into instructions list and receive INSN rtx form. */
2969 fp_adjust_insn = emit_insn (fp_adjust_insn);
2970 }
2971
2972 /* Because we use 'push25 Re,0',
2973 we need to expand one more instruction to adjust $sp.
2974 However, sp_adjust value may be out of range of the addi instruction,
2975 create alternative add behavior with TA_REGNUM if necessary,
2976 using NEGATIVE value to tell that we are decreasing address. */
2977 sp_adjust = nds32_force_addi_stack_int ( (-1) * sp_adjust);
2978 if (sp_adjust)
2979 {
2980 /* Generate sp adjustment instruction
2981 if and only if sp_adjust != 0. */
2982 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
2983 stack_pointer_rtx,
2984 GEN_INT (-1 * sp_adjust));
2985 /* Emit rtx into instructions list and receive INSN rtx form. */
2986 sp_adjust_insn = emit_insn (sp_adjust_insn);
2987
2988 /* The insn rtx 'sp_adjust_insn' will change frame layout.
2989 We need to use RTX_FRAME_RELATED_P so that GCC is able to
2990 generate CFI (Call Frame Information) stuff. */
2991 RTX_FRAME_RELATED_P (sp_adjust_insn) = 1;
2992 }
2993 }
2994
2995 /* Prevent the instruction scheduler from
2996 moving instructions across the boundary. */
2997 emit_insn (gen_blockage ());
2998 }
2999
3000 /* Function for v3pop epilogue. */
3001 void
3002 nds32_expand_epilogue_v3pop (void)
3003 {
3004 int sp_adjust;
3005
3006 rtx Rb, Re;
3007 rtx pop_insn;
3008 rtx sp_adjust_insn;
3009
3010 /* Compute and setup stack frame size.
3011 The result will be in cfun->machine. */
3012 nds32_compute_stack_frame ();
3013
3014 /* Prevent the instruction scheduler from
3015 moving instructions across the boundary. */
3016 emit_insn (gen_blockage ());
3017
3018 /* If the function is 'naked', we do not have to generate
3019 epilogue code fragment BUT 'ret' instruction. */
3020 if (cfun->machine->naked_p)
3021 {
3022 /* Generate return instruction by using
3023 unspec_volatile_func_return pattern.
3024 Make sure this instruction is after gen_blockage().
3025 NOTE that $lp will become 'live'
3026 after this instruction has been emitted. */
3027 emit_insn (gen_unspec_volatile_func_return ());
3028 return;
3029 }
3030
3031 /* Get callee_first_regno and callee_last_regno. */
3032 Rb = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_first_regno);
3033 Re = gen_rtx_REG (SImode, cfun->machine->callee_saved_regs_last_regno);
3034
3035 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
3036 where imm8u has to be 8-byte alignment. */
3037 sp_adjust = cfun->machine->local_size
3038 + cfun->machine->out_args_size
3039 + cfun->machine->callee_saved_area_padding_bytes;
3040
3041 /* We have to consider alloca issue as well.
3042 If the function does call alloca(), the stack pointer is not fixed.
3043 In that case, we cannot use 'pop25 Re,imm8u' directly.
3044 We have to caculate stack pointer from frame pointer
3045 and then use 'pop25 Re,0'.
3046 Of course, the frame_pointer_needed should be nonzero
3047 if the function calls alloca(). */
3048 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
3049 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
3050 && !cfun->calls_alloca)
3051 {
3052 /* We can use 'pop25 Re,imm8u'. */
3053
3054 /* pop_insn = gen_stack_v3pop(last_regno, sp_adjust),
3055 the pattern 'stack_v3pop' is implementad in nds32.md.
3056 The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3057 pop_insn = nds32_gen_stack_v3pop (Rb, Re,
3058 GEN_INT (14), GEN_INT (sp_adjust));
3059
3060 /* Emit pop instruction. */
3061 emit_insn (pop_insn);
3062 }
3063 else
3064 {
3065 /* We have to use 'pop25 Re,0', and prior to it,
3066 we must expand one more instruction to adjust $sp. */
3067
3068 if (frame_pointer_needed)
3069 {
3070 /* adjust $sp = $fp - 4 ($fp size)
3071 - 4 ($gp size)
3072 - 4 ($lp size)
3073 - (4 * n) (callee-saved registers)
3074 Note: No need to adjust
3075 cfun->machine->callee_saved_area_padding_bytes,
3076 because we want to adjust stack pointer
3077 to the position for pop instruction. */
3078 sp_adjust = cfun->machine->fp_size
3079 + cfun->machine->gp_size
3080 + cfun->machine->lp_size
3081 + cfun->machine->callee_saved_regs_size;
3082 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3083 hard_frame_pointer_rtx,
3084 GEN_INT (-1 * sp_adjust));
3085 /* Emit rtx into instructions list and receive INSN rtx form. */
3086 sp_adjust_insn = emit_insn (sp_adjust_insn);
3087 }
3088 else
3089 {
3090 /* If frame pointer is NOT needed,
3091 we cannot calculate the sp adjustment from frame pointer.
3092 Instead, we calculate the adjustment by local_size,
3093 out_args_size, and callee_saved_area_padding_bytes.
3094 Notice that such sp adjustment value may be out of range,
3095 so we have to deal with it as well. */
3096
3097 /* Adjust $sp = $sp + local_size + out_args_size
3098 + callee_saved_area_padding_bytes. */
3099 sp_adjust = cfun->machine->local_size
3100 + cfun->machine->out_args_size
3101 + cfun->machine->callee_saved_area_padding_bytes;
3102 /* sp_adjust value may be out of range of the addi instruction,
3103 create alternative add behavior with TA_REGNUM if necessary,
3104 using POSITIVE value to tell that we are increasing address. */
3105 sp_adjust = nds32_force_addi_stack_int (sp_adjust);
3106 if (sp_adjust)
3107 {
3108 /* Generate sp adjustment instruction
3109 if and only if sp_adjust != 0. */
3110 sp_adjust_insn = gen_addsi3 (stack_pointer_rtx,
3111 stack_pointer_rtx,
3112 GEN_INT (sp_adjust));
3113 /* Emit rtx into instructions list and receive INSN rtx form. */
3114 sp_adjust_insn = emit_insn (sp_adjust_insn);
3115 }
3116 }
3117
3118 /* pop_insn = gen_stack_v3pop(last_regno, sp_adjust),
3119 the pattern 'stack_v3pop' is implementad in nds32.md. */
3120 /* The (const_int 14) means v3pop always pop { $fp $gp $lp }. */
3121 pop_insn = nds32_gen_stack_v3pop (Rb, Re,
3122 GEN_INT (14), GEN_INT (0));
3123
3124 /* Emit pop instruction. */
3125 emit_insn (pop_insn);
3126 }
3127 }
3128
3129 /* ------------------------------------------------------------------------ */
3130
3131 /* Function to test 333-form for load/store instructions.
3132 This is auxiliary extern function for auxiliary macro in nds32.h.
3133 Because it is a little complicated, we use function instead of macro. */
3134 bool
3135 nds32_ls_333_p (rtx rt, rtx ra, rtx imm, enum machine_mode mode)
3136 {
3137 if (REGNO_REG_CLASS (REGNO (rt)) == LOW_REGS
3138 && REGNO_REG_CLASS (REGNO (ra)) == LOW_REGS)
3139 {
3140 if (GET_MODE_SIZE (mode) == 4)
3141 return satisfies_constraint_Iu05 (imm);
3142
3143 if (GET_MODE_SIZE (mode) == 2)
3144 return satisfies_constraint_Iu04 (imm);
3145
3146 if (GET_MODE_SIZE (mode) == 1)
3147 return satisfies_constraint_Iu03 (imm);
3148 }
3149
3150 return false;
3151 }
3152
3153
3154 /* Functions to expand load_multiple and store_multiple.
3155 They are auxiliary extern functions to help create rtx template.
3156 Check nds32-multiple.md file for the patterns. */
3157 rtx
3158 nds32_expand_load_multiple (int base_regno, int count,
3159 rtx base_addr, rtx basemem)
3160 {
3161 int par_index;
3162 int offset;
3163 rtx result;
3164 rtx new_addr, mem, reg;
3165
3166 /* Create the pattern that is presented in nds32-multiple.md. */
3167
3168 result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
3169
3170 for (par_index = 0; par_index < count; par_index++)
3171 {
3172 offset = par_index * 4;
3173 /* 4-byte for loading data to each register. */
3174 new_addr = plus_constant (Pmode, base_addr, offset);
3175 mem = adjust_automodify_address_nv (basemem, SImode,
3176 new_addr, offset);
3177 reg = gen_rtx_REG (SImode, base_regno + par_index);
3178
3179 XVECEXP (result, 0, par_index) = gen_rtx_SET (VOIDmode, reg, mem);
3180 }
3181
3182 return result;
3183 }
3184
3185 rtx
3186 nds32_expand_store_multiple (int base_regno, int count,
3187 rtx base_addr, rtx basemem)
3188 {
3189 int par_index;
3190 int offset;
3191 rtx result;
3192 rtx new_addr, mem, reg;
3193
3194 /* Create the pattern that is presented in nds32-multiple.md. */
3195
3196 result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
3197
3198 for (par_index = 0; par_index < count; par_index++)
3199 {
3200 offset = par_index * 4;
3201 /* 4-byte for storing data to memory. */
3202 new_addr = plus_constant (Pmode, base_addr, offset);
3203 mem = adjust_automodify_address_nv (basemem, SImode,
3204 new_addr, offset);
3205 reg = gen_rtx_REG (SImode, base_regno + par_index);
3206
3207 XVECEXP (result, 0, par_index) = gen_rtx_SET (VOIDmode, mem, reg);
3208 }
3209
3210 return result;
3211 }
3212
3213 /* Function to move block memory content by
3214 using load_multiple and store_multiple.
3215 This is auxiliary extern function to help create rtx template.
3216 Check nds32-multiple.md file for the patterns. */
3217 int
3218 nds32_expand_movmemqi (rtx dstmem, rtx srcmem, rtx total_bytes, rtx alignment)
3219 {
3220 HOST_WIDE_INT in_words, out_words;
3221 rtx dst_base_reg, src_base_reg;
3222 int maximum_bytes;
3223
3224 /* Because reduced-set regsiters has few registers
3225 (r0~r5, r6~10, r15, r28~r31, where 'r15' and 'r28~r31'
3226 cannot be used for register allocation),
3227 using 8 registers (32 bytes) for moving memory block
3228 may easily consume all of them.
3229 It makes register allocation/spilling hard to work.
3230 So we only allow maximum=4 registers (16 bytes) for
3231 moving memory block under reduced-set registers. */
3232 if (TARGET_REDUCED_REGS)
3233 maximum_bytes = 16;
3234 else
3235 maximum_bytes = 32;
3236
3237 /* 1. Total_bytes is integer for sure.
3238 2. Alignment is integer for sure.
3239 3. Maximum 4 or 8 registers, 4 * 4 = 16 bytes, 8 * 4 = 32 bytes.
3240 4. Requires (n * 4) block size.
3241 5. Requires 4-byte alignment. */
3242 if (GET_CODE (total_bytes) != CONST_INT
3243 || GET_CODE (alignment) != CONST_INT
3244 || INTVAL (total_bytes) > maximum_bytes
3245 || INTVAL (total_bytes) & 3
3246 || INTVAL (alignment) & 3)
3247 return 0;
3248
3249 dst_base_reg = copy_to_mode_reg (SImode, XEXP (dstmem, 0));
3250 src_base_reg = copy_to_mode_reg (SImode, XEXP (srcmem, 0));
3251
3252 out_words = in_words = INTVAL (total_bytes) / UNITS_PER_WORD;
3253
3254 emit_insn (nds32_expand_load_multiple (0, in_words, src_base_reg, srcmem));
3255 emit_insn (nds32_expand_store_multiple (0, out_words, dst_base_reg, dstmem));
3256
3257 /* Successfully create patterns, return 1. */
3258 return 1;
3259 }
3260
3261 /* Computing the Length of an Insn.
3262 Modifies the length assigned to instruction INSN.
3263 LEN is the initially computed length of the insn. */
3264 int
3265 nds32_adjust_insn_length (rtx insn, int length)
3266 {
3267 rtx src, dst;
3268
3269 switch (recog_memoized (insn))
3270 {
3271 case CODE_FOR_move_df:
3272 case CODE_FOR_move_di:
3273 /* Adjust length of movd44 to 2. */
3274 src = XEXP (PATTERN (insn), 1);
3275 dst = XEXP (PATTERN (insn), 0);
3276
3277 if (REG_P (src)
3278 && REG_P (dst)
3279 && (REGNO (src) % 2) == 0
3280 && (REGNO (dst) % 2) == 0)
3281 length = 2;
3282 break;
3283
3284 default:
3285 break;
3286 }
3287
3288 return length;
3289 }
3290
3291
3292
3293 /* Return true if is load/store with SYMBOL_REF addressing mode
3294 and memory mode is SImode. */
3295 bool
3296 nds32_symbol_load_store_p (rtx insn)
3297 {
3298 rtx mem_src = NULL_RTX;
3299
3300 switch (get_attr_type (insn))
3301 {
3302 case TYPE_LOAD:
3303 mem_src = SET_SRC (PATTERN (insn));
3304 break;
3305 case TYPE_STORE:
3306 mem_src = SET_DEST (PATTERN (insn));
3307 break;
3308 default:
3309 break;
3310 }
3311
3312 /* Find load/store insn with addressing mode is SYMBOL_REF. */
3313 if (mem_src != NULL_RTX)
3314 {
3315 if ((GET_CODE (mem_src) == ZERO_EXTEND)
3316 || (GET_CODE (mem_src) == SIGN_EXTEND))
3317 mem_src = XEXP (mem_src, 0);
3318
3319 if ((GET_CODE (XEXP (mem_src, 0)) == SYMBOL_REF)
3320 || (GET_CODE (XEXP (mem_src, 0)) == LO_SUM))
3321 return true;
3322 }
3323
3324 return false;
3325 }
3326
3327 /* Function to determine whether it is worth to do fp_as_gp optimization.
3328 Return 0: It is NOT worth to do fp_as_gp optimization.
3329 Return 1: It is APPROXIMATELY worth to do fp_as_gp optimization.
3330 Note that if it is worth to do fp_as_gp optimization,
3331 we MUST set FP_REGNUM ever live in this function. */
3332 int
3333 nds32_fp_as_gp_check_available (void)
3334 {
3335 /* If there exists ANY of following conditions,
3336 we DO NOT perform fp_as_gp optimization:
3337 1. TARGET_FORBID_FP_AS_GP is set
3338 regardless of the TARGET_FORCE_FP_AS_GP.
3339 2. User explicitly uses 'naked' attribute.
3340 3. Not optimize for size.
3341 4. Need frame pointer.
3342 5. If $fp is already required to be saved,
3343 it means $fp is already choosen by register allocator.
3344 Thus we better not to use it for fp_as_gp optimization.
3345 6. This function is a vararg function.
3346 DO NOT apply fp_as_gp optimization on this function
3347 because it may change and break stack frame.
3348 7. The epilogue is empty.
3349 This happens when the function uses exit()
3350 or its attribute is no_return.
3351 In that case, compiler will not expand epilogue
3352 so that we have no chance to output .omit_fp_end directive. */
3353 if (TARGET_FORBID_FP_AS_GP
3354 || lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
3355 || !optimize_size
3356 || frame_pointer_needed
3357 || NDS32_REQUIRED_CALLEE_SAVED_P (FP_REGNUM)
3358 || (cfun->stdarg == 1)
3359 || (find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == NULL))
3360 return 0;
3361
3362 /* Now we can check the possibility of using fp_as_gp optimization. */
3363 if (TARGET_FORCE_FP_AS_GP)
3364 {
3365 /* User explicitly issues -mforce-fp-as-gp option. */
3366 df_set_regs_ever_live (FP_REGNUM, 1);
3367 return 1;
3368 }
3369 else
3370 {
3371 /* In the following we are going to evaluate whether
3372 it is worth to do fp_as_gp optimization. */
3373 int good_gain = 0;
3374 int symbol_count = 0;
3375
3376 int threshold;
3377 rtx insn;
3378
3379 /* We check if there already requires prologue.
3380 Note that $gp will be saved in prologue for PIC code generation.
3381 After that, we can set threshold by the existence of prologue.
3382 Each fp-implied instruction will gain 2-byte code size
3383 from gp-aware instruction, so we have following heuristics. */
3384 if (flag_pic
3385 || nds32_have_prologue_p ())
3386 {
3387 /* Have-prologue:
3388 Compiler already intends to generate prologue content,
3389 so the fp_as_gp optimization will only insert
3390 'la $fp,_FP_BASE_' instruction, which will be
3391 converted into 4-byte instruction at link time.
3392 The threshold is "3" symbol accesses, 2 + 2 + 2 > 4. */
3393 threshold = 3;
3394 }
3395 else
3396 {
3397 /* None-prologue:
3398 Compiler originally does not generate prologue content,
3399 so the fp_as_gp optimization will NOT ONLY insert
3400 'la $fp,_FP_BASE' instruction, but also causes
3401 push/pop instructions.
3402 If we are using v3push (push25/pop25),
3403 the threshold is "5" symbol accesses, 5*2 > 4 + 2 + 2;
3404 If we are using normal push (smw/lmw),
3405 the threshold is "5+2" symbol accesses 7*2 > 4 + 4 + 4. */
3406 threshold = 5 + (TARGET_V3PUSH ? 0 : 2);
3407 }
3408
3409 /* We would like to traverse every instruction in this function.
3410 So we need to have push_topmost_sequence()/pop_topmost_sequence()
3411 surrounding our for-loop evaluation. */
3412 push_topmost_sequence ();
3413 /* Counting the insn number which the addressing mode is symbol. */
3414 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3415 {
3416 if (single_set (insn) && nds32_symbol_load_store_p (insn))
3417 symbol_count++;
3418
3419 if (symbol_count == threshold)
3420 {
3421 good_gain = 1;
3422 break;
3423 }
3424 }
3425 pop_topmost_sequence ();
3426
3427 /* Enable fp_as_gp optimization when potential gain is good enough. */
3428 if (good_gain)
3429 {
3430 df_set_regs_ever_live (FP_REGNUM, 1);
3431 return 1;
3432 }
3433 }
3434
3435 /* By default we return 0. */
3436 return 0;
3437 }
3438
3439
3440 /* Function to generate PC relative jump table.
3441 Refer to nds32.md for more details.
3442
3443 The following is the sample for the case that diff value
3444 can be presented in '.short' size.
3445
3446 addi $r1, $r1, -(case_lower_bound)
3447 slti $ta, $r1, (case_number)
3448 beqz $ta, .L_skip_label
3449
3450 la $ta, .L35 ! get jump table address
3451 lh $r1, [$ta + $r1 << 1] ! load symbol diff from jump table entry
3452 addi $ta, $r1, $ta
3453 jr5 $ta
3454
3455 ! jump table entry
3456 L35:
3457 .short .L25-.L35
3458 .short .L26-.L35
3459 .short .L27-.L35
3460 .short .L28-.L35
3461 .short .L29-.L35
3462 .short .L30-.L35
3463 .short .L31-.L35
3464 .short .L32-.L35
3465 .short .L33-.L35
3466 .short .L34-.L35 */
3467 const char *
3468 nds32_output_casesi_pc_relative (rtx *operands)
3469 {
3470 enum machine_mode mode;
3471 rtx diff_vec;
3472
3473 diff_vec = PATTERN (NEXT_INSN (operands[1]));
3474
3475 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
3476
3477 /* Step C: "t <-- operands[1]". */
3478 output_asm_insn ("la\t$ta, %l1", operands);
3479
3480 /* Get the mode of each element in the difference vector. */
3481 mode = GET_MODE (diff_vec);
3482
3483 /* Step D: "z <-- (mem (plus (operands[0] << m) t))",
3484 where m is 0, 1, or 2 to load address-diff value from table. */
3485 switch (mode)
3486 {
3487 case QImode:
3488 output_asm_insn ("lb\t%2, [$ta + %0 << 0]", operands);
3489 break;
3490 case HImode:
3491 output_asm_insn ("lh\t%2, [$ta + %0 << 1]", operands);
3492 break;
3493 case SImode:
3494 output_asm_insn ("lw\t%2, [$ta + %0 << 2]", operands);
3495 break;
3496 default:
3497 gcc_unreachable ();
3498 }
3499
3500 /* Step E: "t <-- z + t".
3501 Add table label_ref with address-diff value to
3502 obtain target case address. */
3503 output_asm_insn ("add\t$ta, %2, $ta", operands);
3504
3505 /* Step F: jump to target with register t. */
3506 if (TARGET_16_BIT)
3507 return "jr5\t$ta";
3508 else
3509 return "jr\t$ta";
3510 }
3511
3512 /* Function to generate normal jump table. */
3513 const char *
3514 nds32_output_casesi (rtx *operands)
3515 {
3516 /* Step C: "t <-- operands[1]". */
3517 output_asm_insn ("la\t$ta, %l1", operands);
3518
3519 /* Step D: "z <-- (mem (plus (operands[0] << 2) t))". */
3520 output_asm_insn ("lw\t%2, [$ta + %0 << 2]", operands);
3521
3522 /* No need to perform Step E, which is only used for
3523 pc relative jump table. */
3524
3525 /* Step F: jump to target with register z. */
3526 if (TARGET_16_BIT)
3527 return "jr5\t%2";
3528 else
3529 return "jr\t%2";
3530 }
3531
3532
3533 /* Function to return memory format. */
3534 enum nds32_16bit_address_type
3535 nds32_mem_format (rtx op)
3536 {
3537 enum machine_mode mode_test;
3538 int val;
3539 int regno;
3540
3541 if (!TARGET_16_BIT)
3542 return ADDRESS_NOT_16BIT_FORMAT;
3543
3544 mode_test = GET_MODE (op);
3545
3546 op = XEXP (op, 0);
3547
3548 /* 45 format. */
3549 if (GET_CODE (op) == REG && (mode_test == SImode))
3550 return ADDRESS_REG;
3551
3552 /* 333 format for QI/HImode. */
3553 if (GET_CODE (op) == REG && (REGNO (op) < R8_REGNUM))
3554 return ADDRESS_LO_REG_IMM3U;
3555
3556 /* post_inc 333 format. */
3557 if ((GET_CODE (op) == POST_INC) && (mode_test == SImode))
3558 {
3559 regno = REGNO(XEXP (op, 0));
3560
3561 if (regno < 8)
3562 return ADDRESS_POST_INC_LO_REG_IMM3U;
3563 }
3564
3565 /* post_inc 333 format. */
3566 if ((GET_CODE (op) == POST_MODIFY)
3567 && (mode_test == SImode)
3568 && (REG_P (XEXP (XEXP (op, 1), 0)))
3569 && (CONST_INT_P (XEXP (XEXP (op, 1), 1))))
3570 {
3571 regno = REGNO (XEXP (XEXP (op, 1), 0));
3572 val = INTVAL (XEXP (XEXP (op, 1), 1));
3573 if (regno < 8 && val < 32)
3574 return ADDRESS_POST_INC_LO_REG_IMM3U;
3575 }
3576
3577 if ((GET_CODE (op) == PLUS)
3578 && (GET_CODE (XEXP (op, 0)) == REG)
3579 && (GET_CODE (XEXP (op, 1)) == CONST_INT))
3580 {
3581 val = INTVAL (XEXP (op, 1));
3582
3583 regno = REGNO(XEXP (op, 0));
3584
3585 if (regno > 7
3586 && regno != SP_REGNUM
3587 && regno != FP_REGNUM)
3588 return ADDRESS_NOT_16BIT_FORMAT;
3589
3590 switch (mode_test)
3591 {
3592 case QImode:
3593 /* 333 format. */
3594 if (val >= 0 && val < 8 && regno < 8)
3595 return ADDRESS_LO_REG_IMM3U;
3596 break;
3597
3598 case HImode:
3599 /* 333 format. */
3600 if (val >= 0 && val < 16 && (val % 2 == 0) && regno < 8)
3601 return ADDRESS_LO_REG_IMM3U;
3602 break;
3603
3604 case SImode:
3605 case SFmode:
3606 case DFmode:
3607 /* fp imply 37 format. */
3608 if ((regno == FP_REGNUM) &&
3609 (val >= 0 && val < 512 && (val % 4 == 0)))
3610 return ADDRESS_FP_IMM7U;
3611 /* sp imply 37 format. */
3612 else if ((regno == SP_REGNUM) &&
3613 (val >= 0 && val < 512 && (val % 4 == 0)))
3614 return ADDRESS_SP_IMM7U;
3615 /* 333 format. */
3616 else if (val >= 0 && val < 32 && (val % 4 == 0) && regno < 8)
3617 return ADDRESS_LO_REG_IMM3U;
3618 break;
3619
3620 default:
3621 break;
3622 }
3623 }
3624
3625 return ADDRESS_NOT_16BIT_FORMAT;
3626 }
3627
3628 /* Output 16-bit store. */
3629 const char *
3630 nds32_output_16bit_store (rtx *operands, int byte)
3631 {
3632 char pattern[100];
3633 char size;
3634 rtx code = XEXP (operands[0], 0);
3635
3636 size = nds32_byte_to_size (byte);
3637
3638 switch (nds32_mem_format (operands[0]))
3639 {
3640 case ADDRESS_REG:
3641 operands[0] = code;
3642 output_asm_insn ("swi450\t%1, [%0]", operands);
3643 break;
3644 case ADDRESS_LO_REG_IMM3U:
3645 snprintf (pattern, sizeof (pattern), "s%ci333\t%%1, %%0", size);
3646 output_asm_insn (pattern, operands);
3647 break;
3648 case ADDRESS_POST_INC_LO_REG_IMM3U:
3649 snprintf (pattern, sizeof (pattern), "s%ci333.bi\t%%1, %%0", size);
3650 output_asm_insn (pattern, operands);
3651 break;
3652 case ADDRESS_FP_IMM7U:
3653 output_asm_insn ("swi37\t%1, %0", operands);
3654 break;
3655 case ADDRESS_SP_IMM7U:
3656 /* Get immediate value and set back to operands[1]. */
3657 operands[0] = XEXP (code, 1);
3658 output_asm_insn ("swi37.sp\t%1, [ + (%0)]", operands);
3659 break;
3660 default:
3661 break;
3662 }
3663
3664 return "";
3665 }
3666
3667 /* Output 16-bit load. */
3668 const char *
3669 nds32_output_16bit_load (rtx *operands, int byte)
3670 {
3671 char pattern[100];
3672 unsigned char size;
3673 rtx code = XEXP (operands[1], 0);
3674
3675 size = nds32_byte_to_size (byte);
3676
3677 switch (nds32_mem_format (operands[1]))
3678 {
3679 case ADDRESS_REG:
3680 operands[1] = code;
3681 output_asm_insn ("lwi450\t%0, [%1]", operands);
3682 break;
3683 case ADDRESS_LO_REG_IMM3U:
3684 snprintf (pattern, sizeof (pattern), "l%ci333\t%%0, %%1", size);
3685 output_asm_insn (pattern, operands);
3686 break;
3687 case ADDRESS_POST_INC_LO_REG_IMM3U:
3688 snprintf (pattern, sizeof (pattern), "l%ci333.bi\t%%0, %%1", size);
3689 output_asm_insn (pattern, operands);
3690 break;
3691 case ADDRESS_FP_IMM7U:
3692 output_asm_insn ("lwi37\t%0, %1", operands);
3693 break;
3694 case ADDRESS_SP_IMM7U:
3695 /* Get immediate value and set back to operands[0]. */
3696 operands[1] = XEXP (code, 1);
3697 output_asm_insn ("lwi37.sp\t%0, [ + (%1)]", operands);
3698 break;
3699 default:
3700 break;
3701 }
3702
3703 return "";
3704 }
3705
3706 /* Output 32-bit store. */
3707 const char *
3708 nds32_output_32bit_store (rtx *operands, int byte)
3709 {
3710 char pattern[100];
3711 unsigned char size;
3712 rtx code = XEXP (operands[0], 0);
3713
3714 size = nds32_byte_to_size (byte);
3715
3716 switch (GET_CODE (code))
3717 {
3718 case REG:
3719 /* (mem (reg X))
3720 => access location by using register,
3721 use "sbi / shi / swi" */
3722 snprintf (pattern, sizeof (pattern), "s%ci\t%%1, %%0", size);
3723 break;
3724
3725 case SYMBOL_REF:
3726 case CONST:
3727 /* (mem (symbol_ref X))
3728 (mem (const (...)))
3729 => access global variables,
3730 use "sbi.gp / shi.gp / swi.gp" */
3731 operands[0] = XEXP (operands[0], 0);
3732 snprintf (pattern, sizeof (pattern), "s%ci.gp\t%%1, [ + %%0]", size);
3733 break;
3734
3735 case POST_INC:
3736 /* (mem (post_inc reg))
3737 => access location by using register which will be post increment,
3738 use "sbi.bi / shi.bi / swi.bi" */
3739 snprintf (pattern, sizeof (pattern),
3740 "s%ci.bi\t%%1, %%0, %d", size, byte);
3741 break;
3742
3743 case POST_DEC:
3744 /* (mem (post_dec reg))
3745 => access location by using register which will be post decrement,
3746 use "sbi.bi / shi.bi / swi.bi" */
3747 snprintf (pattern, sizeof (pattern),
3748 "s%ci.bi\t%%1, %%0, -%d", size, byte);
3749 break;
3750
3751 case POST_MODIFY:
3752 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
3753 {
3754 case REG:
3755 case SUBREG:
3756 /* (mem (post_modify (reg) (plus (reg) (reg))))
3757 => access location by using register which will be
3758 post modified with reg,
3759 use "sb.bi/ sh.bi / sw.bi" */
3760 snprintf (pattern, sizeof (pattern), "s%c.bi\t%%1, %%0", size);
3761 break;
3762 case CONST_INT:
3763 /* (mem (post_modify (reg) (plus (reg) (const_int))))
3764 => access location by using register which will be
3765 post modified with const_int,
3766 use "sbi.bi/ shi.bi / swi.bi" */
3767 snprintf (pattern, sizeof (pattern), "s%ci.bi\t%%1, %%0", size);
3768 break;
3769 default:
3770 abort ();
3771 }
3772 break;
3773
3774 case PLUS:
3775 switch (GET_CODE (XEXP (code, 1)))
3776 {
3777 case REG:
3778 case SUBREG:
3779 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
3780 => access location by adding two registers,
3781 use "sb / sh / sw" */
3782 snprintf (pattern, sizeof (pattern), "s%c\t%%1, %%0", size);
3783 break;
3784 case CONST_INT:
3785 /* (mem (plus reg const_int))
3786 => access location by adding one register with const_int,
3787 use "sbi / shi / swi" */
3788 snprintf (pattern, sizeof (pattern), "s%ci\t%%1, %%0", size);
3789 break;
3790 default:
3791 abort ();
3792 }
3793 break;
3794
3795 case LO_SUM:
3796 operands[2] = XEXP (code, 1);
3797 operands[0] = XEXP (code, 0);
3798 snprintf (pattern, sizeof (pattern),
3799 "s%ci\t%%1, [%%0 + lo12(%%2)]", size);
3800 break;
3801
3802 default:
3803 abort ();
3804 }
3805
3806 output_asm_insn (pattern, operands);
3807 return "";
3808 }
3809
3810 /* Output 32-bit load. */
3811 const char *
3812 nds32_output_32bit_load (rtx *operands, int byte)
3813 {
3814 char pattern[100];
3815 unsigned char size;
3816 rtx code;
3817
3818 code = XEXP (operands[1], 0);
3819
3820 size = nds32_byte_to_size (byte);
3821
3822 switch (GET_CODE (code))
3823 {
3824 case REG:
3825 /* (mem (reg X))
3826 => access location by using register,
3827 use "lbi / lhi / lwi" */
3828 snprintf (pattern, sizeof (pattern), "l%ci\t%%0, %%1", size);
3829 break;
3830
3831 case SYMBOL_REF:
3832 case CONST:
3833 /* (mem (symbol_ref X))
3834 (mem (const (...)))
3835 => access global variables,
3836 use "lbi.gp / lhi.gp / lwi.gp" */
3837 operands[1] = XEXP (operands[1], 0);
3838 snprintf (pattern, sizeof (pattern), "l%ci.gp\t%%0, [ + %%1]", size);
3839 break;
3840
3841 case POST_INC:
3842 /* (mem (post_inc reg))
3843 => access location by using register which will be post increment,
3844 use "lbi.bi / lhi.bi / lwi.bi" */
3845 snprintf (pattern, sizeof (pattern),
3846 "l%ci.bi\t%%0, %%1, %d", size, byte);
3847 break;
3848
3849 case POST_DEC:
3850 /* (mem (post_dec reg))
3851 => access location by using register which will be post decrement,
3852 use "lbi.bi / lhi.bi / lwi.bi" */
3853 snprintf (pattern, sizeof (pattern),
3854 "l%ci.bi\t%%0, %%1, -%d", size, byte);
3855 break;
3856
3857 case POST_MODIFY:
3858 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
3859 {
3860 case REG:
3861 case SUBREG:
3862 /* (mem (post_modify (reg) (plus (reg) (reg))))
3863 => access location by using register which will be
3864 post modified with reg,
3865 use "lb.bi/ lh.bi / lw.bi" */
3866 snprintf (pattern, sizeof (pattern), "l%c.bi\t%%0, %%1", size);
3867 break;
3868 case CONST_INT:
3869 /* (mem (post_modify (reg) (plus (reg) (const_int))))
3870 => access location by using register which will be
3871 post modified with const_int,
3872 use "lbi.bi/ lhi.bi / lwi.bi" */
3873 snprintf (pattern, sizeof (pattern), "l%ci.bi\t%%0, %%1", size);
3874 break;
3875 default:
3876 abort ();
3877 }
3878 break;
3879
3880 case PLUS:
3881 switch (GET_CODE (XEXP (code, 1)))
3882 {
3883 case REG:
3884 case SUBREG:
3885 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
3886 use "lb / lh / lw" */
3887 snprintf (pattern, sizeof (pattern), "l%c\t%%0, %%1", size);
3888 break;
3889 case CONST_INT:
3890 /* (mem (plus reg const_int))
3891 => access location by adding one register with const_int,
3892 use "lbi / lhi / lwi" */
3893 snprintf (pattern, sizeof (pattern), "l%ci\t%%0, %%1", size);
3894 break;
3895 default:
3896 abort ();
3897 }
3898 break;
3899
3900 case LO_SUM:
3901 operands[2] = XEXP (code, 1);
3902 operands[1] = XEXP (code, 0);
3903 snprintf (pattern, sizeof (pattern),
3904 "l%ci\t%%0, [%%1 + lo12(%%2)]", size);
3905 break;
3906
3907 default:
3908 abort ();
3909 }
3910
3911 output_asm_insn (pattern, operands);
3912 return "";
3913 }
3914
3915 /* Output 32-bit load with signed extension. */
3916 const char *
3917 nds32_output_32bit_load_s (rtx *operands, int byte)
3918 {
3919 char pattern[100];
3920 unsigned char size;
3921 rtx code;
3922
3923 code = XEXP (operands[1], 0);
3924
3925 size = nds32_byte_to_size (byte);
3926
3927 switch (GET_CODE (code))
3928 {
3929 case REG:
3930 /* (mem (reg X))
3931 => access location by using register,
3932 use "lbsi / lhsi" */
3933 snprintf (pattern, sizeof (pattern), "l%csi\t%%0, %%1", size);
3934 break;
3935
3936 case SYMBOL_REF:
3937 case CONST:
3938 /* (mem (symbol_ref X))
3939 (mem (const (...)))
3940 => access global variables,
3941 use "lbsi.gp / lhsi.gp" */
3942 operands[1] = XEXP (operands[1], 0);
3943 snprintf (pattern, sizeof (pattern), "l%csi.gp\t%%0, [ + %%1]", size);
3944 break;
3945
3946 case POST_INC:
3947 /* (mem (post_inc reg))
3948 => access location by using register which will be post increment,
3949 use "lbsi.bi / lhsi.bi" */
3950 snprintf (pattern, sizeof (pattern),
3951 "l%csi.bi\t%%0, %%1, %d", size, byte);
3952 break;
3953
3954 case POST_DEC:
3955 /* (mem (post_dec reg))
3956 => access location by using register which will be post decrement,
3957 use "lbsi.bi / lhsi.bi" */
3958 snprintf (pattern, sizeof (pattern),
3959 "l%csi.bi\t%%0, %%1, -%d", size, byte);
3960 break;
3961
3962 case POST_MODIFY:
3963 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
3964 {
3965 case REG:
3966 case SUBREG:
3967 /* (mem (post_modify (reg) (plus (reg) (reg))))
3968 => access location by using register which will be
3969 post modified with reg,
3970 use "lbs.bi/ lhs.bi" */
3971 snprintf (pattern, sizeof (pattern), "l%cs.bi\t%%0, %%1", size);
3972 break;
3973 case CONST_INT:
3974 /* (mem (post_modify (reg) (plus (reg) (const_int))))
3975 => access location by using register which will be
3976 post modified with const_int,
3977 use "lbsi.bi/ lhsi.bi" */
3978 snprintf (pattern, sizeof (pattern), "l%csi.bi\t%%0, %%1", size);
3979 break;
3980 default:
3981 abort ();
3982 }
3983 break;
3984
3985 case PLUS:
3986 switch (GET_CODE (XEXP (code, 1)))
3987 {
3988 case REG:
3989 case SUBREG:
3990 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
3991 use "lbs / lhs" */
3992 snprintf (pattern, sizeof (pattern), "l%cs\t%%0, %%1", size);
3993 break;
3994 case CONST_INT:
3995 /* (mem (plus reg const_int))
3996 => access location by adding one register with const_int,
3997 use "lbsi / lhsi" */
3998 snprintf (pattern, sizeof (pattern), "l%csi\t%%0, %%1", size);
3999 break;
4000 default:
4001 abort ();
4002 }
4003 break;
4004
4005 case LO_SUM:
4006 operands[2] = XEXP (code, 1);
4007 operands[1] = XEXP (code, 0);
4008 snprintf (pattern, sizeof (pattern),
4009 "l%csi\t%%0, [%%1 + lo12(%%2)]", size);
4010 break;
4011
4012 default:
4013 abort ();
4014 }
4015
4016 output_asm_insn (pattern, operands);
4017 return "";
4018 }
4019
4020 /* Function to output stack push operation.
4021 We need to deal with normal stack push multiple or stack v3push. */
4022 const char *
4023 nds32_output_stack_push (void)
4024 {
4025 /* A string pattern for output_asm_insn(). */
4026 char pattern[100];
4027 /* The operands array which will be used in output_asm_insn(). */
4028 rtx operands[3];
4029 /* Pick up callee-saved first regno and last regno for further use. */
4030 int rb_regno = cfun->machine->callee_saved_regs_first_regno;
4031 int re_regno = cfun->machine->callee_saved_regs_last_regno;
4032
4033 if (TARGET_V3PUSH)
4034 {
4035 /* For stack v3push:
4036 operands[0]: Re
4037 operands[1]: imm8u */
4038
4039 /* This variable is to check if 'push25 Re,imm8u' is available. */
4040 int sp_adjust;
4041
4042 /* Set operands[0]. */
4043 operands[0] = gen_rtx_REG (SImode, re_regno);
4044
4045 /* Check if we can generate 'push25 Re,imm8u',
4046 otherwise, generate 'push25 Re,0'. */
4047 sp_adjust = cfun->machine->local_size
4048 + cfun->machine->out_args_size
4049 + cfun->machine->callee_saved_area_padding_bytes;
4050 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
4051 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
4052 operands[1] = GEN_INT (sp_adjust);
4053 else
4054 operands[1] = GEN_INT (0);
4055
4056 /* Create assembly code pattern. */
4057 snprintf (pattern, sizeof (pattern), "push25\t%%0, %%1");
4058 }
4059 else
4060 {
4061 /* For normal stack push multiple:
4062 operands[0]: Rb
4063 operands[1]: Re
4064 operands[2]: En4 */
4065
4066 /* This variable is used to check if we only need to generate En4 field.
4067 As long as Rb==Re=SP_REGNUM, we set this variable to 1. */
4068 int push_en4_only_p = 0;
4069
4070 /* Set operands[0] and operands[1]. */
4071 operands[0] = gen_rtx_REG (SImode, rb_regno);
4072 operands[1] = gen_rtx_REG (SImode, re_regno);
4073
4074 /* 'smw.adm $sp,[$sp],$sp,0' means push nothing. */
4075 if (!cfun->machine->fp_size
4076 && !cfun->machine->gp_size
4077 && !cfun->machine->lp_size
4078 && REGNO (operands[0]) == SP_REGNUM
4079 && REGNO (operands[1]) == SP_REGNUM)
4080 {
4081 /* No need to generate instruction. */
4082 return "";
4083 }
4084 else
4085 {
4086 /* If Rb==Re=SP_REGNUM, we only need to generate En4 field. */
4087 if (REGNO (operands[0]) == SP_REGNUM
4088 && REGNO (operands[1]) == SP_REGNUM)
4089 push_en4_only_p = 1;
4090
4091 /* Create assembly code pattern.
4092 We need to handle the form: "Rb, Re, { $fp $gp $lp }". */
4093 snprintf (pattern, sizeof (pattern),
4094 "push.s\t%s{%s%s%s }",
4095 push_en4_only_p ? "" : "%0, %1, ",
4096 cfun->machine->fp_size ? " $fp" : "",
4097 cfun->machine->gp_size ? " $gp" : "",
4098 cfun->machine->lp_size ? " $lp" : "");
4099 }
4100 }
4101
4102 /* We use output_asm_insn() to output assembly code by ourself. */
4103 output_asm_insn (pattern, operands);
4104 return "";
4105 }
4106
4107 /* Function to output stack pop operation.
4108 We need to deal with normal stack pop multiple or stack v3pop. */
4109 const char *
4110 nds32_output_stack_pop (void)
4111 {
4112 /* A string pattern for output_asm_insn(). */
4113 char pattern[100];
4114 /* The operands array which will be used in output_asm_insn(). */
4115 rtx operands[3];
4116 /* Pick up callee-saved first regno and last regno for further use. */
4117 int rb_regno = cfun->machine->callee_saved_regs_first_regno;
4118 int re_regno = cfun->machine->callee_saved_regs_last_regno;
4119
4120 if (TARGET_V3PUSH)
4121 {
4122 /* For stack v3pop:
4123 operands[0]: Re
4124 operands[1]: imm8u */
4125
4126 /* This variable is to check if 'pop25 Re,imm8u' is available. */
4127 int sp_adjust;
4128
4129 /* Set operands[0]. */
4130 operands[0] = gen_rtx_REG (SImode, re_regno);
4131
4132 /* Check if we can generate 'pop25 Re,imm8u',
4133 otherwise, generate 'pop25 Re,0'.
4134 We have to consider alloca issue as well.
4135 If the function does call alloca(), the stack pointer is not fixed.
4136 In that case, we cannot use 'pop25 Re,imm8u' directly.
4137 We have to caculate stack pointer from frame pointer
4138 and then use 'pop25 Re,0'. */
4139 sp_adjust = cfun->machine->local_size
4140 + cfun->machine->out_args_size
4141 + cfun->machine->callee_saved_area_padding_bytes;
4142 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
4143 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
4144 && !cfun->calls_alloca)
4145 operands[1] = GEN_INT (sp_adjust);
4146 else
4147 operands[1] = GEN_INT (0);
4148
4149 /* Create assembly code pattern. */
4150 snprintf (pattern, sizeof (pattern), "pop25\t%%0, %%1");
4151 }
4152 else
4153 {
4154 /* For normal stack pop multiple:
4155 operands[0]: Rb
4156 operands[1]: Re
4157 operands[2]: En4 */
4158
4159 /* This variable is used to check if we only need to generate En4 field.
4160 As long as Rb==Re=SP_REGNUM, we set this variable to 1. */
4161 int pop_en4_only_p = 0;
4162
4163 /* Set operands[0] and operands[1]. */
4164 operands[0] = gen_rtx_REG (SImode, rb_regno);
4165 operands[1] = gen_rtx_REG (SImode, re_regno);
4166
4167 /* 'lmw.bim $sp,[$sp],$sp,0' means pop nothing. */
4168 if (!cfun->machine->fp_size
4169 && !cfun->machine->gp_size
4170 && !cfun->machine->lp_size
4171 && REGNO (operands[0]) == SP_REGNUM
4172 && REGNO (operands[1]) == SP_REGNUM)
4173 {
4174 /* No need to generate instruction. */
4175 return "";
4176 }
4177 else
4178 {
4179 /* If Rb==Re=SP_REGNUM, we only need to generate En4 field. */
4180 if (REGNO (operands[0]) == SP_REGNUM
4181 && REGNO (operands[1]) == SP_REGNUM)
4182 pop_en4_only_p = 1;
4183
4184 /* Create assembly code pattern.
4185 We need to handle the form: "Rb, Re, { $fp $gp $lp }". */
4186 snprintf (pattern, sizeof (pattern),
4187 "pop.s\t%s{%s%s%s }",
4188 pop_en4_only_p ? "" : "%0, %1, ",
4189 cfun->machine->fp_size ? " $fp" : "",
4190 cfun->machine->gp_size ? " $gp" : "",
4191 cfun->machine->lp_size ? " $lp" : "");
4192 }
4193 }
4194
4195 /* We use output_asm_insn() to output assembly code by ourself. */
4196 output_asm_insn (pattern, operands);
4197 return "";
4198 }
4199
4200 /* Return align 2 (log base 2) if the next instruction of LABEL is 4 byte. */
4201 int
4202 nds32_target_alignment (rtx label)
4203 {
4204 rtx insn;
4205
4206 if (optimize_size)
4207 return 0;
4208
4209 insn = next_active_insn (label);
4210
4211 if (insn == 0)
4212 return 0;
4213 else if ((get_attr_length (insn) % 4) == 0)
4214 return 2;
4215 else
4216 return 0;
4217 }
4218
4219 /* ------------------------------------------------------------------------ */
4220
4221 /* PART 5: Initialize target hook structure and definitions. */
4222 \f
4223 /* Controlling the Compilation Driver. */
4224
4225 \f
4226 /* Run-time Target Specification. */
4227
4228 \f
4229 /* Defining Data Structures for Per-function Information. */
4230
4231 \f
4232 /* Storage Layout. */
4233
4234 #undef TARGET_PROMOTE_FUNCTION_MODE
4235 #define TARGET_PROMOTE_FUNCTION_MODE \
4236 default_promote_function_mode_always_promote
4237
4238 \f
4239 /* Layout of Source Language Data Types. */
4240
4241 \f
4242 /* Register Usage. */
4243
4244 /* -- Basic Characteristics of Registers. */
4245
4246 /* -- Order of Allocation of Registers. */
4247
4248 /* -- How Values Fit in Registers. */
4249
4250 /* -- Handling Leaf Functions. */
4251
4252 /* -- Registers That Form a Stack. */
4253
4254 \f
4255 /* Register Classes. */
4256
4257 #undef TARGET_CLASS_MAX_NREGS
4258 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
4259
4260 #undef TARGET_LRA_P
4261 #define TARGET_LRA_P hook_bool_void_true
4262
4263 #undef TARGET_REGISTER_PRIORITY
4264 #define TARGET_REGISTER_PRIORITY nds32_register_priority
4265
4266 \f
4267 /* Obsolete Macros for Defining Constraints. */
4268
4269 \f
4270 /* Stack Layout and Calling Conventions. */
4271
4272 /* -- Basic Stack Layout. */
4273
4274 /* -- Exception Handling Support. */
4275
4276 /* -- Specifying How Stack Checking is Done. */
4277
4278 /* -- Registers That Address the Stack Frame. */
4279
4280 /* -- Eliminating Frame Pointer and Arg Pointer. */
4281
4282 #undef TARGET_CAN_ELIMINATE
4283 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
4284
4285 /* -- Passing Function Arguments on the Stack. */
4286
4287 /* -- Passing Arguments in Registers. */
4288
4289 #undef TARGET_FUNCTION_ARG
4290 #define TARGET_FUNCTION_ARG nds32_function_arg
4291
4292 #undef TARGET_FUNCTION_ARG_ADVANCE
4293 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
4294
4295 #undef TARGET_FUNCTION_ARG_BOUNDARY
4296 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
4297
4298 /* -- How Scalar Function Values Are Returned. */
4299
4300 #undef TARGET_FUNCTION_VALUE
4301 #define TARGET_FUNCTION_VALUE nds32_function_value
4302
4303 #undef TARGET_LIBCALL_VALUE
4304 #define TARGET_LIBCALL_VALUE nds32_libcall_value
4305
4306 #undef TARGET_FUNCTION_VALUE_REGNO_P
4307 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
4308
4309 /* -- How Large Values Are Returned. */
4310
4311 /* -- Caller-Saves Register Allocation. */
4312
4313 /* -- Function Entry and Exit. */
4314
4315 #undef TARGET_ASM_FUNCTION_PROLOGUE
4316 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
4317
4318 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
4319 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
4320
4321 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
4322 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
4323
4324 #undef TARGET_ASM_FUNCTION_EPILOGUE
4325 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
4326
4327 #undef TARGET_ASM_OUTPUT_MI_THUNK
4328 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
4329
4330 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
4331 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
4332
4333 /* -- Generating Code for Profiling. */
4334
4335 /* -- Permitting tail calls. */
4336
4337 #undef TARGET_WARN_FUNC_RETURN
4338 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
4339
4340 /* Stack smashing protection. */
4341
4342 \f
4343 /* Implementing the Varargs Macros. */
4344
4345 #undef TARGET_STRICT_ARGUMENT_NAMING
4346 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
4347
4348 \f
4349 /* Trampolines for Nested Functions. */
4350
4351 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
4352 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
4353
4354 #undef TARGET_TRAMPOLINE_INIT
4355 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
4356
4357 \f
4358 /* Implicit Calls to Library Routines. */
4359
4360 \f
4361 /* Addressing Modes. */
4362
4363 #undef TARGET_LEGITIMATE_ADDRESS_P
4364 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
4365
4366 \f
4367 /* Anchored Addresses. */
4368
4369 \f
4370 /* Condition Code Status. */
4371
4372 /* -- Representation of condition codes using (cc0). */
4373
4374 /* -- Representation of condition codes using registers. */
4375
4376 /* -- Macros to control conditional execution. */
4377
4378 \f
4379 /* Describing Relative Costs of Operations. */
4380
4381 #undef TARGET_REGISTER_MOVE_COST
4382 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
4383
4384 #undef TARGET_MEMORY_MOVE_COST
4385 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
4386
4387 #undef TARGET_RTX_COSTS
4388 #define TARGET_RTX_COSTS nds32_rtx_costs
4389
4390 #undef TARGET_ADDRESS_COST
4391 #define TARGET_ADDRESS_COST nds32_address_cost
4392
4393 \f
4394 /* Adjusting the Instruction Scheduler. */
4395
4396 \f
4397 /* Dividing the Output into Sections (Texts, Data, . . . ). */
4398
4399 \f
4400 /* Position Independent Code. */
4401
4402 \f
4403 /* Defining the Output Assembler Language. */
4404
4405 /* -- The Overall Framework of an Assembler File. */
4406
4407 #undef TARGET_ASM_FILE_START
4408 #define TARGET_ASM_FILE_START nds32_asm_file_start
4409 #undef TARGET_ASM_FILE_END
4410 #define TARGET_ASM_FILE_END nds32_asm_file_end
4411
4412 /* -- Output of Data. */
4413
4414 #undef TARGET_ASM_ALIGNED_HI_OP
4415 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
4416
4417 #undef TARGET_ASM_ALIGNED_SI_OP
4418 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
4419
4420 /* -- Output of Uninitialized Variables. */
4421
4422 /* -- Output and Generation of Labels. */
4423
4424 #undef TARGET_ASM_GLOBALIZE_LABEL
4425 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
4426
4427 /* -- How Initialization Functions Are Handled. */
4428
4429 /* -- Macros Controlling Initialization Routines. */
4430
4431 /* -- Output of Assembler Instructions. */
4432
4433 #undef TARGET_PRINT_OPERAND
4434 #define TARGET_PRINT_OPERAND nds32_print_operand
4435 #undef TARGET_PRINT_OPERAND_ADDRESS
4436 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
4437
4438 /* -- Output of Dispatch Tables. */
4439
4440 /* -- Assembler Commands for Exception Regions. */
4441
4442 /* -- Assembler Commands for Alignment. */
4443
4444 \f
4445 /* Controlling Debugging Information Format. */
4446
4447 /* -- Macros Affecting All Debugging Formats. */
4448
4449 /* -- Specific Options for DBX Output. */
4450
4451 /* -- Open-Ended Hooks for DBX Format. */
4452
4453 /* -- File Names in DBX Format. */
4454
4455 /* -- Macros for SDB and DWARF Output. */
4456
4457 /* -- Macros for VMS Debug Format. */
4458
4459 \f
4460 /* Cross Compilation and Floating Point. */
4461
4462 \f
4463 /* Mode Switching Instructions. */
4464
4465 \f
4466 /* Defining target-specific uses of __attribute__. */
4467
4468 #undef TARGET_ATTRIBUTE_TABLE
4469 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
4470
4471 #undef TARGET_MERGE_DECL_ATTRIBUTES
4472 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
4473
4474 #undef TARGET_INSERT_ATTRIBUTES
4475 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
4476
4477 #undef TARGET_OPTION_PRAGMA_PARSE
4478 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
4479
4480 #undef TARGET_OPTION_OVERRIDE
4481 #define TARGET_OPTION_OVERRIDE nds32_option_override
4482
4483 \f
4484 /* Emulating TLS. */
4485
4486 \f
4487 /* Defining coprocessor specifics for MIPS targets. */
4488
4489 \f
4490 /* Parameters for Precompiled Header Validity Checking. */
4491
4492 \f
4493 /* C++ ABI parameters. */
4494
4495 \f
4496 /* Adding support for named address spaces. */
4497
4498 \f
4499 /* Miscellaneous Parameters. */
4500
4501 #undef TARGET_INIT_BUILTINS
4502 #define TARGET_INIT_BUILTINS nds32_init_builtins
4503
4504 #undef TARGET_EXPAND_BUILTIN
4505 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
4506
4507 \f
4508 /* ------------------------------------------------------------------------ */
4509
4510 /* Initialize the GCC target structure. */
4511
4512 struct gcc_target targetm = TARGET_INITIALIZER;
4513
4514 /* ------------------------------------------------------------------------ */