]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/nds32/nds32.c
Wrap option names in gcc internal messages with %< and %>.
[thirdparty/gcc.git] / gcc / config / nds32 / nds32.c
1 /* Subroutines used for code generation of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2019 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* ------------------------------------------------------------------------ */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tree-pass.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "df.h"
36 #include "memmodel.h"
37 #include "tm_p.h"
38 #include "optabs.h" /* For GEN_FCN. */
39 #include "regs.h"
40 #include "emit-rtl.h"
41 #include "recog.h"
42 #include "diagnostic-core.h"
43 #include "stor-layout.h"
44 #include "varasm.h"
45 #include "calls.h"
46 #include "output.h"
47 #include "explow.h"
48 #include "expr.h"
49 #include "tm-constrs.h"
50 #include "builtins.h"
51 #include "cpplib.h"
52 #include "context.h"
53
54 /* This file should be included last. */
55 #include "target-def.h"
56
57 /* ------------------------------------------------------------------------ */
58
59 /* This file is divided into five parts:
60
61 PART 1: Auxiliary static variable definitions and
62 target hook static variable definitions.
63
64 PART 2: Auxiliary static function definitions.
65
66 PART 3: Implement target hook stuff definitions.
67
68 PART 4: Implemet extern function definitions,
69 the prototype is in nds32-protos.h.
70
71 PART 5: Initialize target hook structure and definitions. */
72
73 /* ------------------------------------------------------------------------ */
74
75 /* PART 1: Auxiliary static variable definitions and
76 target hook static variable definitions. */
77
78 /* Define intrinsic register names.
79 Please refer to nds32_intrinsic.h file, the index is corresponding to
80 'enum nds32_intrinsic_registers' data type values.
81 NOTE that the base value starting from 1024. */
82 static const char * const nds32_intrinsic_register_names[] =
83 {
84 "$CPU_VER",
85 "$ICM_CFG",
86 "$DCM_CFG",
87 "$MMU_CFG",
88 "$MSC_CFG",
89 "$MSC_CFG2",
90 "$CORE_ID",
91 "$FUCOP_EXIST",
92
93 "$PSW",
94 "$IPSW",
95 "$P_IPSW",
96 "$IVB",
97 "$EVA",
98 "$P_EVA",
99 "$ITYPE",
100 "$P_ITYPE",
101
102 "$MERR",
103 "$IPC",
104 "$P_IPC",
105 "$OIPC",
106 "$P_P0",
107 "$P_P1",
108
109 "$INT_MASK",
110 "$INT_MASK2",
111 "$INT_MASK3",
112 "$INT_PEND",
113 "$INT_PEND2",
114 "$INT_PEND3",
115 "$SP_USR",
116 "$SP_PRIV",
117 "$INT_PRI",
118 "$INT_PRI2",
119 "$INT_PRI3",
120 "$INT_PRI4",
121 "$INT_CTRL",
122 "$INT_TRIGGER",
123 "$INT_TRIGGER2",
124 "$INT_GPR_PUSH_DIS",
125
126 "$MMU_CTL",
127 "$L1_PPTB",
128 "$TLB_VPN",
129 "$TLB_DATA",
130 "$TLB_MISC",
131 "$VLPT_IDX",
132 "$ILMB",
133 "$DLMB",
134
135 "$CACHE_CTL",
136 "$HSMP_SADDR",
137 "$HSMP_EADDR",
138 "$SDZ_CTL",
139 "$N12MISC_CTL",
140 "$MISC_CTL",
141 "$ECC_MISC",
142
143 "$BPC0",
144 "$BPC1",
145 "$BPC2",
146 "$BPC3",
147 "$BPC4",
148 "$BPC5",
149 "$BPC6",
150 "$BPC7",
151
152 "$BPA0",
153 "$BPA1",
154 "$BPA2",
155 "$BPA3",
156 "$BPA4",
157 "$BPA5",
158 "$BPA6",
159 "$BPA7",
160
161 "$BPAM0",
162 "$BPAM1",
163 "$BPAM2",
164 "$BPAM3",
165 "$BPAM4",
166 "$BPAM5",
167 "$BPAM6",
168 "$BPAM7",
169
170 "$BPV0",
171 "$BPV1",
172 "$BPV2",
173 "$BPV3",
174 "$BPV4",
175 "$BPV5",
176 "$BPV6",
177 "$BPV7",
178
179 "$BPCID0",
180 "$BPCID1",
181 "$BPCID2",
182 "$BPCID3",
183 "$BPCID4",
184 "$BPCID5",
185 "$BPCID6",
186 "$BPCID7",
187
188 "$EDM_CFG",
189 "$EDMSW",
190 "$EDM_CTL",
191 "$EDM_DTR",
192 "$BPMTC",
193 "$DIMBR",
194
195 "$TECR0",
196 "$TECR1",
197 "$PFMC0",
198 "$PFMC1",
199 "$PFMC2",
200 "$PFM_CTL",
201 "$PFT_CTL",
202 "$HSP_CTL",
203 "$SP_BOUND",
204 "$SP_BOUND_PRIV",
205 "$SP_BASE",
206 "$SP_BASE_PRIV",
207 "$FUCOP_CTL",
208 "$PRUSR_ACC_CTL",
209
210 "$DMA_CFG",
211 "$DMA_GCSW",
212 "$DMA_CHNSEL",
213 "$DMA_ACT",
214 "$DMA_SETUP",
215 "$DMA_ISADDR",
216 "$DMA_ESADDR",
217 "$DMA_TCNT",
218 "$DMA_STATUS",
219 "$DMA_2DSET",
220 "$DMA_2DSCTL",
221 "$DMA_RCNT",
222 "$DMA_HSTATUS",
223
224 "$PC",
225 "$SP_USR1",
226 "$SP_USR2",
227 "$SP_USR3",
228 "$SP_PRIV1",
229 "$SP_PRIV2",
230 "$SP_PRIV3",
231 "$BG_REGION",
232 "$SFCR",
233 "$SIGN",
234 "$ISIGN",
235 "$P_ISIGN",
236 "$IFC_LP",
237 "$ITB"
238 };
239
240 /* Define instrinsic cctl names. */
241 static const char * const nds32_cctl_names[] =
242 {
243 "L1D_VA_FILLCK",
244 "L1D_VA_ULCK",
245 "L1I_VA_FILLCK",
246 "L1I_VA_ULCK",
247
248 "L1D_IX_WBINVAL",
249 "L1D_IX_INVAL",
250 "L1D_IX_WB",
251 "L1I_IX_INVAL",
252
253 "L1D_VA_INVAL",
254 "L1D_VA_WB",
255 "L1D_VA_WBINVAL",
256 "L1I_VA_INVAL",
257
258 "L1D_IX_RTAG",
259 "L1D_IX_RWD",
260 "L1I_IX_RTAG",
261 "L1I_IX_RWD",
262
263 "L1D_IX_WTAG",
264 "L1D_IX_WWD",
265 "L1I_IX_WTAG",
266 "L1I_IX_WWD"
267 };
268
269 static const char * const nds32_dpref_names[] =
270 {
271 "SRD",
272 "MRD",
273 "SWR",
274 "MWR",
275 "PTE",
276 "CLWR"
277 };
278
279 /* Defining register allocation order for performance.
280 We want to allocate callee-saved registers after others.
281 It may be used by nds32_adjust_reg_alloc_order(). */
282 static const int nds32_reg_alloc_order_for_speed[] =
283 {
284 0, 1, 2, 3, 4, 5, 16, 17,
285 18, 19, 20, 21, 22, 23, 24, 25,
286 26, 27, 6, 7, 8, 9, 10, 11,
287 12, 13, 14, 15
288 };
289
290 /* Defining target-specific uses of __attribute__. */
291 static const struct attribute_spec nds32_attribute_table[] =
292 {
293 /* Syntax: { name, min_len, max_len, decl_required, type_required,
294 function_type_required, affects_type_identity, handler,
295 exclude } */
296
297 /* The interrupt vid: [0-63]+ (actual vector number starts from 9 to 72). */
298 { "interrupt", 1, 64, false, false, false, false, NULL, NULL },
299 /* The exception vid: [1-8]+ (actual vector number starts from 1 to 8). */
300 { "exception", 1, 8, false, false, false, false, NULL, NULL },
301 /* Argument is user's interrupt numbers. The vector number is always 0. */
302 { "reset", 1, 1, false, false, false, false, NULL, NULL },
303
304 /* The attributes describing isr nested type. */
305 { "nested", 0, 0, false, false, false, false, NULL, NULL },
306 { "not_nested", 0, 0, false, false, false, false, NULL, NULL },
307 { "nested_ready", 0, 0, false, false, false, false, NULL, NULL },
308 { "critical", 0, 0, false, false, false, false, NULL, NULL },
309
310 /* The attributes describing isr register save scheme. */
311 { "save_all", 0, 0, false, false, false, false, NULL, NULL },
312 { "partial_save", 0, 0, false, false, false, false, NULL, NULL },
313
314 /* The attributes used by reset attribute. */
315 { "nmi", 1, 1, false, false, false, false, NULL, NULL },
316 { "warm", 1, 1, false, false, false, false, NULL, NULL },
317
318 /* The attributes describing isr security level. */
319 { "secure", 1, 1, false, false, false, false, NULL, NULL },
320
321 /* The attribute telling no prologue/epilogue. */
322 { "naked", 0, 0, false, false, false, false, NULL, NULL },
323
324 /* The attribute is used to tell this function to be ROM patch. */
325 { "indirect_call",0, 0, false, false, false, false, NULL, NULL },
326
327 /* FOR BACKWARD COMPATIBILITY,
328 this attribute also tells no prologue/epilogue. */
329 { "no_prologue", 0, 0, false, false, false, false, NULL, NULL },
330
331 /* The last attribute spec is set to be NULL. */
332 { NULL, 0, 0, false, false, false, false, NULL, NULL }
333 };
334
335
336 /* ------------------------------------------------------------------------ */
337
338 /* PART 2: Auxiliary static function definitions. */
339
340 /* Function to save and restore machine-specific function data. */
341 static struct machine_function *
342 nds32_init_machine_status (void)
343 {
344 struct machine_function *machine;
345 machine = ggc_cleared_alloc<machine_function> ();
346
347 /* Initially assume this function does not use __builtin_eh_return. */
348 machine->use_eh_return_p = 0;
349
350 /* Initially assume this function needs prologue/epilogue. */
351 machine->naked_p = 0;
352
353 /* Initially assume this function does NOT use fp_as_gp optimization. */
354 machine->fp_as_gp_p = 0;
355
356 /* Initially this function is not under strictly aligned situation. */
357 machine->strict_aligned_p = 0;
358
359 /* Initially this function has no naked and no_prologue attributes. */
360 machine->attr_naked_p = 0;
361 machine->attr_no_prologue_p = 0;
362
363 return machine;
364 }
365
366 /* Function to compute stack frame size and
367 store into cfun->machine structure. */
368 static void
369 nds32_compute_stack_frame (void)
370 {
371 int r;
372 int block_size;
373 bool v3pushpop_p;
374
375 /* Because nds32_compute_stack_frame() will be called from different place,
376 everytime we enter this function, we have to assume this function
377 needs prologue/epilogue. */
378 cfun->machine->naked_p = 0;
379
380 /* We need to mark whether this function has naked and no_prologue
381 attribute so that we can distinguish the difference if users applies
382 -mret-in-naked-func option. */
383 cfun->machine->attr_naked_p
384 = lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
385 ? 1 : 0;
386 cfun->machine->attr_no_prologue_p
387 = lookup_attribute ("no_prologue", DECL_ATTRIBUTES (current_function_decl))
388 ? 1 : 0;
389
390 /* If __builtin_eh_return is used, we better have frame pointer needed
391 so that we can easily locate the stack slot of return address. */
392 if (crtl->calls_eh_return)
393 {
394 frame_pointer_needed = 1;
395
396 /* We need to mark eh data registers that need to be saved
397 in the stack. */
398 cfun->machine->eh_return_data_first_regno = EH_RETURN_DATA_REGNO (0);
399 for (r = 0; EH_RETURN_DATA_REGNO (r) != INVALID_REGNUM; r++)
400 cfun->machine->eh_return_data_last_regno = r;
401
402 cfun->machine->eh_return_data_regs_size
403 = 4 * (cfun->machine->eh_return_data_last_regno
404 - cfun->machine->eh_return_data_first_regno
405 + 1);
406 cfun->machine->use_eh_return_p = 1;
407 }
408 else
409 {
410 /* Assigning SP_REGNUM to eh_first_regno and eh_last_regno means we
411 do not need to handle __builtin_eh_return case in this function. */
412 cfun->machine->eh_return_data_first_regno = SP_REGNUM;
413 cfun->machine->eh_return_data_last_regno = SP_REGNUM;
414
415 cfun->machine->eh_return_data_regs_size = 0;
416 cfun->machine->use_eh_return_p = 0;
417 }
418
419 /* Get variadic arguments size to prepare pretend arguments and
420 we will push them into stack at prologue by ourself. */
421 cfun->machine->va_args_size = crtl->args.pretend_args_size;
422 if (cfun->machine->va_args_size != 0)
423 {
424 cfun->machine->va_args_first_regno
425 = NDS32_GPR_ARG_FIRST_REGNUM
426 + NDS32_MAX_GPR_REGS_FOR_ARGS
427 - (crtl->args.pretend_args_size / UNITS_PER_WORD);
428 cfun->machine->va_args_last_regno
429 = NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS - 1;
430 }
431 else
432 {
433 cfun->machine->va_args_first_regno = SP_REGNUM;
434 cfun->machine->va_args_last_regno = SP_REGNUM;
435 }
436
437 /* Important: We need to make sure that varargs area is 8-byte alignment. */
438 block_size = cfun->machine->va_args_size;
439 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
440 {
441 cfun->machine->va_args_area_padding_bytes
442 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
443 }
444
445 /* Get local variables, incoming variables, and temporary variables size.
446 Note that we need to make sure it is 8-byte alignment because
447 there may be no padding bytes if we are using LRA. */
448 cfun->machine->local_size = NDS32_ROUND_UP_DOUBLE_WORD (get_frame_size ());
449
450 /* Get outgoing arguments size. */
451 cfun->machine->out_args_size = crtl->outgoing_args_size;
452
453 /* If $fp value is required to be saved on stack, it needs 4 bytes space.
454 Check whether $fp is ever live. */
455 cfun->machine->fp_size = (df_regs_ever_live_p (FP_REGNUM)) ? 4 : 0;
456
457 /* If $gp value is required to be saved on stack, it needs 4 bytes space.
458 Check whether we are using PIC code genration. */
459 cfun->machine->gp_size =
460 (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)) ? 4 : 0;
461
462 /* If $lp value is required to be saved on stack, it needs 4 bytes space.
463 Check whether $lp is ever live. */
464 cfun->machine->lp_size
465 = (flag_always_save_lp || df_regs_ever_live_p (LP_REGNUM)) ? 4 : 0;
466
467 /* Initially there is no padding bytes. */
468 cfun->machine->callee_saved_area_gpr_padding_bytes = 0;
469
470 /* Calculate the bytes of saving callee-saved registers on stack. */
471 cfun->machine->callee_saved_gpr_regs_size = 0;
472 cfun->machine->callee_saved_first_gpr_regno = SP_REGNUM;
473 cfun->machine->callee_saved_last_gpr_regno = SP_REGNUM;
474 cfun->machine->callee_saved_fpr_regs_size = 0;
475 cfun->machine->callee_saved_first_fpr_regno = SP_REGNUM;
476 cfun->machine->callee_saved_last_fpr_regno = SP_REGNUM;
477
478 /* Currently, there is no need to check $r28~$r31
479 because we will save them in another way. */
480 for (r = 0; r < 28; r++)
481 {
482 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
483 {
484 /* Mark the first required callee-saved register
485 (only need to set it once).
486 If first regno == SP_REGNUM, we can tell that
487 it is the first time to be here. */
488 if (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM)
489 cfun->machine->callee_saved_first_gpr_regno = r;
490 /* Mark the last required callee-saved register. */
491 cfun->machine->callee_saved_last_gpr_regno = r;
492 }
493 }
494
495 /* Recording fpu callee-saved register. */
496 if (TARGET_HARD_FLOAT)
497 {
498 for (r = NDS32_FIRST_FPR_REGNUM; r < NDS32_LAST_FPR_REGNUM; r++)
499 {
500 if (NDS32_REQUIRED_CALLEE_SAVED_P (r))
501 {
502 /* Mark the first required callee-saved register. */
503 if (cfun->machine->callee_saved_first_fpr_regno == SP_REGNUM)
504 {
505 /* Make first callee-saved number is even,
506 bacause we use doubleword access, and this way
507 promise 8-byte alignemt. */
508 if (!NDS32_FPR_REGNO_OK_FOR_DOUBLE (r))
509 cfun->machine->callee_saved_first_fpr_regno = r - 1;
510 else
511 cfun->machine->callee_saved_first_fpr_regno = r;
512 }
513 cfun->machine->callee_saved_last_fpr_regno = r;
514 }
515 }
516
517 /* Make last callee-saved register number is odd,
518 we hope callee-saved register is even. */
519 int last_fpr = cfun->machine->callee_saved_last_fpr_regno;
520 if (NDS32_FPR_REGNO_OK_FOR_DOUBLE (last_fpr))
521 cfun->machine->callee_saved_last_fpr_regno++;
522 }
523
524 /* Check if this function can omit prologue/epilogue code fragment.
525 If there is 'no_prologue'/'naked'/'secure' attribute in this function,
526 we can set 'naked_p' flag to indicate that
527 we do not have to generate prologue/epilogue.
528 Or, if all the following conditions succeed,
529 we can set this function 'naked_p' as well:
530 condition 1: first_regno == last_regno == SP_REGNUM,
531 which means we do not have to save
532 any callee-saved registers.
533 condition 2: Both $lp and $fp are NOT live in this function,
534 which means we do not need to save them and there
535 is no outgoing size.
536 condition 3: There is no local_size, which means
537 we do not need to adjust $sp. */
538 if (lookup_attribute ("no_prologue", DECL_ATTRIBUTES (current_function_decl))
539 || lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))
540 || lookup_attribute ("secure", DECL_ATTRIBUTES (current_function_decl))
541 || (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM
542 && cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM
543 && cfun->machine->callee_saved_first_fpr_regno == SP_REGNUM
544 && cfun->machine->callee_saved_last_fpr_regno == SP_REGNUM
545 && !df_regs_ever_live_p (FP_REGNUM)
546 && !df_regs_ever_live_p (LP_REGNUM)
547 && cfun->machine->local_size == 0
548 && !flag_pic))
549 {
550 /* Set this function 'naked_p' and other functions can check this flag.
551 Note that in nds32 port, the 'naked_p = 1' JUST means there is no
552 callee-saved, local size, and outgoing size.
553 The varargs space and ret instruction may still present in
554 the prologue/epilogue expanding. */
555 cfun->machine->naked_p = 1;
556
557 /* No need to save $fp, $gp, and $lp.
558 We should set these value to be zero
559 so that nds32_initial_elimination_offset() can work properly. */
560 cfun->machine->fp_size = 0;
561 cfun->machine->gp_size = 0;
562 cfun->machine->lp_size = 0;
563
564 /* If stack usage computation is required,
565 we need to provide the static stack size. */
566 if (flag_stack_usage_info)
567 current_function_static_stack_size = 0;
568
569 /* No need to do following adjustment, return immediately. */
570 return;
571 }
572
573 v3pushpop_p = NDS32_V3PUSH_AVAILABLE_P;
574
575 /* Adjustment for v3push instructions:
576 If we are using v3push (push25/pop25) instructions,
577 we need to make sure Rb is $r6 and Re is
578 located on $r6, $r8, $r10, or $r14.
579 Some results above will be discarded and recomputed.
580 Note that it is only available under V3/V3M ISA and we
581 DO NOT setup following stuff for isr or variadic function. */
582 if (v3pushpop_p)
583 {
584 /* Recompute:
585 cfun->machine->fp_size
586 cfun->machine->gp_size
587 cfun->machine->lp_size
588 cfun->machine->callee_saved_first_gpr_regno
589 cfun->machine->callee_saved_last_gpr_regno */
590
591 /* For v3push instructions, $fp, $gp, and $lp are always saved. */
592 cfun->machine->fp_size = 4;
593 cfun->machine->gp_size = 4;
594 cfun->machine->lp_size = 4;
595
596 /* Remember to set Rb = $r6. */
597 cfun->machine->callee_saved_first_gpr_regno = 6;
598
599 if (cfun->machine->callee_saved_last_gpr_regno <= 6)
600 {
601 /* Re = $r6 */
602 cfun->machine->callee_saved_last_gpr_regno = 6;
603 }
604 else if (cfun->machine->callee_saved_last_gpr_regno <= 8)
605 {
606 /* Re = $r8 */
607 cfun->machine->callee_saved_last_gpr_regno = 8;
608 }
609 else if (cfun->machine->callee_saved_last_gpr_regno <= 10)
610 {
611 /* Re = $r10 */
612 cfun->machine->callee_saved_last_gpr_regno = 10;
613 }
614 else if (cfun->machine->callee_saved_last_gpr_regno <= 14)
615 {
616 /* Re = $r14 */
617 cfun->machine->callee_saved_last_gpr_regno = 14;
618 }
619 else if (cfun->machine->callee_saved_last_gpr_regno == SP_REGNUM)
620 {
621 /* If last_regno is SP_REGNUM, which means
622 it is never changed, so set it to Re = $r6. */
623 cfun->machine->callee_saved_last_gpr_regno = 6;
624 }
625 else
626 {
627 /* The program flow should not go here. */
628 gcc_unreachable ();
629 }
630 }
631
632 int sp_adjust = cfun->machine->local_size
633 + cfun->machine->out_args_size
634 + cfun->machine->callee_saved_area_gpr_padding_bytes
635 + cfun->machine->callee_saved_fpr_regs_size;
636
637 if (!v3pushpop_p
638 && sp_adjust == 0
639 && !frame_pointer_needed)
640 {
641 block_size = cfun->machine->fp_size
642 + cfun->machine->gp_size
643 + cfun->machine->lp_size;
644
645 if (cfun->machine->callee_saved_last_gpr_regno != SP_REGNUM)
646 block_size += (4 * (cfun->machine->callee_saved_last_gpr_regno
647 - cfun->machine->callee_saved_first_gpr_regno
648 + 1));
649
650 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
651 {
652 /* $r14 is last callee save register. */
653 if (cfun->machine->callee_saved_last_gpr_regno
654 < NDS32_LAST_CALLEE_SAVE_GPR_REGNUM)
655 {
656 cfun->machine->callee_saved_last_gpr_regno++;
657 }
658 else if (cfun->machine->callee_saved_first_gpr_regno == SP_REGNUM)
659 {
660 cfun->machine->callee_saved_first_gpr_regno
661 = NDS32_FIRST_CALLEE_SAVE_GPR_REGNUM;
662 cfun->machine->callee_saved_last_gpr_regno
663 = NDS32_FIRST_CALLEE_SAVE_GPR_REGNUM;
664 }
665 }
666 }
667
668 /* We have correctly set callee_saved_first_gpr_regno
669 and callee_saved_last_gpr_regno.
670 Initially, the callee_saved_gpr_regs_size is supposed to be 0.
671 As long as callee_saved_last_gpr_regno is not SP_REGNUM,
672 we can update callee_saved_gpr_regs_size with new size. */
673 if (cfun->machine->callee_saved_last_gpr_regno != SP_REGNUM)
674 {
675 /* Compute pushed size of callee-saved registers. */
676 cfun->machine->callee_saved_gpr_regs_size
677 = 4 * (cfun->machine->callee_saved_last_gpr_regno
678 - cfun->machine->callee_saved_first_gpr_regno
679 + 1);
680 }
681
682 if (TARGET_HARD_FLOAT)
683 {
684 /* Compute size of callee svaed floating-point registers. */
685 if (cfun->machine->callee_saved_last_fpr_regno != SP_REGNUM)
686 {
687 cfun->machine->callee_saved_fpr_regs_size
688 = 4 * (cfun->machine->callee_saved_last_fpr_regno
689 - cfun->machine->callee_saved_first_fpr_regno
690 + 1);
691 }
692 }
693
694 /* Important: We need to make sure that
695 (fp_size + gp_size + lp_size + callee_saved_gpr_regs_size)
696 is 8-byte alignment.
697 If it is not, calculate the padding bytes. */
698 block_size = cfun->machine->fp_size
699 + cfun->machine->gp_size
700 + cfun->machine->lp_size
701 + cfun->machine->callee_saved_gpr_regs_size;
702 if (!NDS32_DOUBLE_WORD_ALIGN_P (block_size))
703 {
704 cfun->machine->callee_saved_area_gpr_padding_bytes
705 = NDS32_ROUND_UP_DOUBLE_WORD (block_size) - block_size;
706 }
707
708 /* If stack usage computation is required,
709 we need to provide the static stack size. */
710 if (flag_stack_usage_info)
711 {
712 current_function_static_stack_size
713 = NDS32_ROUND_UP_DOUBLE_WORD (block_size)
714 + cfun->machine->local_size
715 + cfun->machine->out_args_size;
716 }
717 }
718
719 /* Function to create a parallel rtx pattern
720 which presents stack push multiple behavior.
721 The overall concept are:
722 "push registers to memory",
723 "adjust stack pointer". */
724 static void
725 nds32_emit_stack_push_multiple (unsigned Rb, unsigned Re,
726 bool save_fp_p, bool save_gp_p, bool save_lp_p,
727 bool vaarg_p)
728 {
729 unsigned regno;
730 int extra_count;
731 int num_use_regs;
732 int par_index;
733 int offset;
734
735 rtx reg;
736 rtx mem;
737 rtx push_rtx;
738 rtx adjust_sp_rtx;
739 rtx parallel_insn;
740 rtx dwarf;
741
742 /* We need to provide a customized rtx which contains
743 necessary information for data analysis,
744 so we create a parallel rtx like this:
745 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
746 (reg:SI Rb))
747 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
748 (reg:SI Rb+1))
749 ...
750 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
751 (reg:SI Re))
752 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
753 (reg:SI FP_REGNUM))
754 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
755 (reg:SI GP_REGNUM))
756 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
757 (reg:SI LP_REGNUM))
758 (set (reg:SI SP_REGNUM)
759 (plus (reg:SI SP_REGNUM) (const_int -32)))]) */
760
761 /* Calculate the number of registers that will be pushed. */
762 extra_count = 0;
763 if (save_fp_p)
764 extra_count++;
765 if (save_gp_p)
766 extra_count++;
767 if (save_lp_p)
768 extra_count++;
769 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
770 if (Rb == SP_REGNUM && Re == SP_REGNUM)
771 num_use_regs = extra_count;
772 else
773 num_use_regs = Re - Rb + 1 + extra_count;
774
775 /* In addition to used registers,
776 we need one more space for (set sp sp-x) rtx. */
777 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
778 rtvec_alloc (num_use_regs + 1));
779 par_index = 0;
780
781 /* Initialize offset and start to create push behavior. */
782 offset = -(num_use_regs * 4);
783
784 /* Create (set mem regX) from Rb, Rb+1 up to Re. */
785 for (regno = Rb; regno <= Re; regno++)
786 {
787 /* Rb and Re may be SP_REGNUM.
788 We need to break this loop immediately. */
789 if (regno == SP_REGNUM)
790 break;
791
792 reg = gen_rtx_REG (SImode, regno);
793 mem = gen_frame_mem (SImode, plus_constant (Pmode,
794 stack_pointer_rtx,
795 offset));
796 push_rtx = gen_rtx_SET (mem, reg);
797 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
798 RTX_FRAME_RELATED_P (push_rtx) = 1;
799 offset = offset + 4;
800 par_index++;
801 }
802
803 /* Create (set mem fp), (set mem gp), and (set mem lp) if necessary. */
804 if (save_fp_p)
805 {
806 reg = gen_rtx_REG (SImode, FP_REGNUM);
807 mem = gen_frame_mem (SImode, plus_constant (Pmode,
808 stack_pointer_rtx,
809 offset));
810 push_rtx = gen_rtx_SET (mem, reg);
811 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
812 RTX_FRAME_RELATED_P (push_rtx) = 1;
813 offset = offset + 4;
814 par_index++;
815 }
816 if (save_gp_p)
817 {
818 reg = gen_rtx_REG (SImode, GP_REGNUM);
819 mem = gen_frame_mem (SImode, plus_constant (Pmode,
820 stack_pointer_rtx,
821 offset));
822 push_rtx = gen_rtx_SET (mem, reg);
823 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
824 RTX_FRAME_RELATED_P (push_rtx) = 1;
825 offset = offset + 4;
826 par_index++;
827 }
828 if (save_lp_p)
829 {
830 reg = gen_rtx_REG (SImode, LP_REGNUM);
831 mem = gen_frame_mem (SImode, plus_constant (Pmode,
832 stack_pointer_rtx,
833 offset));
834 push_rtx = gen_rtx_SET (mem, reg);
835 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
836 RTX_FRAME_RELATED_P (push_rtx) = 1;
837 offset = offset + 4;
838 par_index++;
839 }
840
841 /* Create (set sp sp-x). */
842
843 /* We need to re-calculate the offset value again for adjustment. */
844 offset = -(num_use_regs * 4);
845 adjust_sp_rtx
846 = gen_rtx_SET (stack_pointer_rtx,
847 plus_constant (Pmode, stack_pointer_rtx, offset));
848 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
849 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
850
851 parallel_insn = emit_insn (parallel_insn);
852
853 /* The insn rtx 'parallel_insn' will change frame layout.
854 We need to use RTX_FRAME_RELATED_P so that GCC is able to
855 generate CFI (Call Frame Information) stuff. */
856 RTX_FRAME_RELATED_P (parallel_insn) = 1;
857
858 /* Don't use GCC's logic for CFI info if we are generate a push for VAARG
859 since we will not restore those register at epilogue. */
860 if (vaarg_p)
861 {
862 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA,
863 copy_rtx (adjust_sp_rtx), NULL_RTX);
864 REG_NOTES (parallel_insn) = dwarf;
865 }
866 }
867
868 /* Function to create a parallel rtx pattern
869 which presents stack pop multiple behavior.
870 The overall concept are:
871 "pop registers from memory",
872 "adjust stack pointer". */
873 static void
874 nds32_emit_stack_pop_multiple (unsigned Rb, unsigned Re,
875 bool save_fp_p, bool save_gp_p, bool save_lp_p)
876 {
877 unsigned regno;
878 int extra_count;
879 int num_use_regs;
880 int par_index;
881 int offset;
882
883 rtx reg;
884 rtx mem;
885 rtx pop_rtx;
886 rtx adjust_sp_rtx;
887 rtx parallel_insn;
888 rtx dwarf = NULL_RTX;
889
890 /* We need to provide a customized rtx which contains
891 necessary information for data analysis,
892 so we create a parallel rtx like this:
893 (parallel [(set (reg:SI Rb)
894 (mem (reg:SI SP_REGNUM)))
895 (set (reg:SI Rb+1)
896 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
897 ...
898 (set (reg:SI Re)
899 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
900 (set (reg:SI FP_REGNUM)
901 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
902 (set (reg:SI GP_REGNUM)
903 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
904 (set (reg:SI LP_REGNUM)
905 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
906 (set (reg:SI SP_REGNUM)
907 (plus (reg:SI SP_REGNUM) (const_int 32)))]) */
908
909 /* Calculate the number of registers that will be poped. */
910 extra_count = 0;
911 if (save_fp_p)
912 extra_count++;
913 if (save_gp_p)
914 extra_count++;
915 if (save_lp_p)
916 extra_count++;
917 /* Note that Rb and Re may be SP_REGNUM. DO NOT count it in. */
918 if (Rb == SP_REGNUM && Re == SP_REGNUM)
919 num_use_regs = extra_count;
920 else
921 num_use_regs = Re - Rb + 1 + extra_count;
922
923 /* In addition to used registers,
924 we need one more space for (set sp sp+x) rtx. */
925 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
926 rtvec_alloc (num_use_regs + 1));
927 par_index = 0;
928
929 /* Initialize offset and start to create pop behavior. */
930 offset = 0;
931
932 /* Create (set regX mem) from Rb, Rb+1 up to Re. */
933 for (regno = Rb; regno <= Re; regno++)
934 {
935 /* Rb and Re may be SP_REGNUM.
936 We need to break this loop immediately. */
937 if (regno == SP_REGNUM)
938 break;
939
940 reg = gen_rtx_REG (SImode, regno);
941 mem = gen_frame_mem (SImode, plus_constant (Pmode,
942 stack_pointer_rtx,
943 offset));
944 pop_rtx = gen_rtx_SET (reg, mem);
945 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
946 RTX_FRAME_RELATED_P (pop_rtx) = 1;
947 offset = offset + 4;
948 par_index++;
949
950 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
951 }
952
953 /* Create (set fp mem), (set gp mem), and (set lp mem) if necessary. */
954 if (save_fp_p)
955 {
956 reg = gen_rtx_REG (SImode, FP_REGNUM);
957 mem = gen_frame_mem (SImode, plus_constant (Pmode,
958 stack_pointer_rtx,
959 offset));
960 pop_rtx = gen_rtx_SET (reg, mem);
961 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
962 RTX_FRAME_RELATED_P (pop_rtx) = 1;
963 offset = offset + 4;
964 par_index++;
965
966 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
967 }
968 if (save_gp_p)
969 {
970 reg = gen_rtx_REG (SImode, GP_REGNUM);
971 mem = gen_frame_mem (SImode, plus_constant (Pmode,
972 stack_pointer_rtx,
973 offset));
974 pop_rtx = gen_rtx_SET (reg, mem);
975 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
976 RTX_FRAME_RELATED_P (pop_rtx) = 1;
977 offset = offset + 4;
978 par_index++;
979
980 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
981 }
982 if (save_lp_p)
983 {
984 reg = gen_rtx_REG (SImode, LP_REGNUM);
985 mem = gen_frame_mem (SImode, plus_constant (Pmode,
986 stack_pointer_rtx,
987 offset));
988 pop_rtx = gen_rtx_SET (reg, mem);
989 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
990 RTX_FRAME_RELATED_P (pop_rtx) = 1;
991 offset = offset + 4;
992 par_index++;
993
994 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
995 }
996
997 /* Create (set sp sp+x). */
998
999 /* The offset value is already in place. No need to re-calculate it. */
1000 adjust_sp_rtx
1001 = gen_rtx_SET (stack_pointer_rtx,
1002 plus_constant (Pmode, stack_pointer_rtx, offset));
1003 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
1004
1005 /* Tell gcc we adjust SP in this insn. */
1006 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx), dwarf);
1007
1008 parallel_insn = emit_insn (parallel_insn);
1009
1010 /* The insn rtx 'parallel_insn' will change frame layout.
1011 We need to use RTX_FRAME_RELATED_P so that GCC is able to
1012 generate CFI (Call Frame Information) stuff. */
1013 RTX_FRAME_RELATED_P (parallel_insn) = 1;
1014
1015 /* Add CFI info by manual. */
1016 REG_NOTES (parallel_insn) = dwarf;
1017 }
1018
1019 /* Function to create a parallel rtx pattern
1020 which presents stack v3push behavior.
1021 The overall concept are:
1022 "push registers to memory",
1023 "adjust stack pointer". */
1024 static void
1025 nds32_emit_stack_v3push (unsigned Rb,
1026 unsigned Re,
1027 unsigned imm8u)
1028 {
1029 unsigned regno;
1030 int num_use_regs;
1031 int par_index;
1032 int offset;
1033
1034 rtx reg;
1035 rtx mem;
1036 rtx push_rtx;
1037 rtx adjust_sp_rtx;
1038 rtx parallel_insn;
1039
1040 /* We need to provide a customized rtx which contains
1041 necessary information for data analysis,
1042 so we create a parallel rtx like this:
1043 (parallel [(set (mem (plus (reg:SI SP_REGNUM) (const_int -32)))
1044 (reg:SI Rb))
1045 (set (mem (plus (reg:SI SP_REGNUM) (const_int -28)))
1046 (reg:SI Rb+1))
1047 ...
1048 (set (mem (plus (reg:SI SP_REGNUM) (const_int -16)))
1049 (reg:SI Re))
1050 (set (mem (plus (reg:SI SP_REGNUM) (const_int -12)))
1051 (reg:SI FP_REGNUM))
1052 (set (mem (plus (reg:SI SP_REGNUM) (const_int -8)))
1053 (reg:SI GP_REGNUM))
1054 (set (mem (plus (reg:SI SP_REGNUM) (const_int -4)))
1055 (reg:SI LP_REGNUM))
1056 (set (reg:SI SP_REGNUM)
1057 (plus (reg:SI SP_REGNUM) (const_int -32-imm8u)))]) */
1058
1059 /* Calculate the number of registers that will be pushed.
1060 Since $fp, $gp, and $lp is always pushed with v3push instruction,
1061 we need to count these three registers.
1062 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
1063 So there is no need to worry about Rb=Re=SP_REGNUM case. */
1064 num_use_regs = Re - Rb + 1 + 3;
1065
1066 /* In addition to used registers,
1067 we need one more space for (set sp sp-x-imm8u) rtx. */
1068 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
1069 rtvec_alloc (num_use_regs + 1));
1070 par_index = 0;
1071
1072 /* Initialize offset and start to create push behavior. */
1073 offset = -(num_use_regs * 4);
1074
1075 /* Create (set mem regX) from Rb, Rb+1 up to Re.
1076 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
1077 So there is no need to worry about Rb=Re=SP_REGNUM case. */
1078 for (regno = Rb; regno <= Re; regno++)
1079 {
1080 reg = gen_rtx_REG (SImode, regno);
1081 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1082 stack_pointer_rtx,
1083 offset));
1084 push_rtx = gen_rtx_SET (mem, reg);
1085 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
1086 RTX_FRAME_RELATED_P (push_rtx) = 1;
1087 offset = offset + 4;
1088 par_index++;
1089 }
1090
1091 /* Create (set mem fp). */
1092 reg = gen_rtx_REG (SImode, FP_REGNUM);
1093 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1094 stack_pointer_rtx,
1095 offset));
1096 push_rtx = gen_rtx_SET (mem, reg);
1097 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
1098 RTX_FRAME_RELATED_P (push_rtx) = 1;
1099 offset = offset + 4;
1100 par_index++;
1101 /* Create (set mem gp). */
1102 reg = gen_rtx_REG (SImode, GP_REGNUM);
1103 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1104 stack_pointer_rtx,
1105 offset));
1106 push_rtx = gen_rtx_SET (mem, reg);
1107 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
1108 RTX_FRAME_RELATED_P (push_rtx) = 1;
1109 offset = offset + 4;
1110 par_index++;
1111 /* Create (set mem lp). */
1112 reg = gen_rtx_REG (SImode, LP_REGNUM);
1113 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1114 stack_pointer_rtx,
1115 offset));
1116 push_rtx = gen_rtx_SET (mem, reg);
1117 XVECEXP (parallel_insn, 0, par_index) = push_rtx;
1118 RTX_FRAME_RELATED_P (push_rtx) = 1;
1119 offset = offset + 4;
1120 par_index++;
1121
1122 /* Create (set sp sp-x-imm8u). */
1123
1124 /* We need to re-calculate the offset value again for adjustment. */
1125 offset = -(num_use_regs * 4);
1126 adjust_sp_rtx
1127 = gen_rtx_SET (stack_pointer_rtx,
1128 plus_constant (Pmode,
1129 stack_pointer_rtx,
1130 offset - imm8u));
1131 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
1132 RTX_FRAME_RELATED_P (adjust_sp_rtx) = 1;
1133
1134 parallel_insn = emit_insn (parallel_insn);
1135
1136 /* The insn rtx 'parallel_insn' will change frame layout.
1137 We need to use RTX_FRAME_RELATED_P so that GCC is able to
1138 generate CFI (Call Frame Information) stuff. */
1139 RTX_FRAME_RELATED_P (parallel_insn) = 1;
1140 }
1141
1142 /* Function to create a parallel rtx pattern
1143 which presents stack v3pop behavior.
1144 The overall concept are:
1145 "pop registers from memory",
1146 "adjust stack pointer". */
1147 static void
1148 nds32_emit_stack_v3pop (unsigned Rb,
1149 unsigned Re,
1150 unsigned imm8u)
1151 {
1152 unsigned regno;
1153 int num_use_regs;
1154 int par_index;
1155 int offset;
1156
1157 rtx reg;
1158 rtx mem;
1159 rtx pop_rtx;
1160 rtx adjust_sp_rtx;
1161 rtx parallel_insn;
1162 rtx dwarf = NULL_RTX;
1163
1164 /* We need to provide a customized rtx which contains
1165 necessary information for data analysis,
1166 so we create a parallel rtx like this:
1167 (parallel [(set (reg:SI Rb)
1168 (mem (reg:SI SP_REGNUM)))
1169 (set (reg:SI Rb+1)
1170 (mem (plus (reg:SI SP_REGNUM) (const_int 4))))
1171 ...
1172 (set (reg:SI Re)
1173 (mem (plus (reg:SI SP_REGNUM) (const_int 16))))
1174 (set (reg:SI FP_REGNUM)
1175 (mem (plus (reg:SI SP_REGNUM) (const_int 20))))
1176 (set (reg:SI GP_REGNUM)
1177 (mem (plus (reg:SI SP_REGNUM) (const_int 24))))
1178 (set (reg:SI LP_REGNUM)
1179 (mem (plus (reg:SI SP_REGNUM) (const_int 28))))
1180 (set (reg:SI SP_REGNUM)
1181 (plus (reg:SI SP_REGNUM) (const_int 32+imm8u)))]) */
1182
1183 /* Calculate the number of registers that will be poped.
1184 Since $fp, $gp, and $lp is always poped with v3pop instruction,
1185 we need to count these three registers.
1186 Under v3push, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
1187 So there is no need to worry about Rb=Re=SP_REGNUM case. */
1188 num_use_regs = Re - Rb + 1 + 3;
1189
1190 /* In addition to used registers,
1191 we need one more space for (set sp sp+x+imm8u) rtx. */
1192 parallel_insn = gen_rtx_PARALLEL (VOIDmode,
1193 rtvec_alloc (num_use_regs + 1));
1194 par_index = 0;
1195
1196 /* Initialize offset and start to create pop behavior. */
1197 offset = 0;
1198
1199 /* Create (set regX mem) from Rb, Rb+1 up to Re.
1200 Under v3pop, Rb is $r6, while Re is $r6, $r8, $r10, or $r14.
1201 So there is no need to worry about Rb=Re=SP_REGNUM case. */
1202 for (regno = Rb; regno <= Re; regno++)
1203 {
1204 reg = gen_rtx_REG (SImode, regno);
1205 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1206 stack_pointer_rtx,
1207 offset));
1208 pop_rtx = gen_rtx_SET (reg, mem);
1209 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
1210 RTX_FRAME_RELATED_P (pop_rtx) = 1;
1211 offset = offset + 4;
1212 par_index++;
1213
1214 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
1215 }
1216
1217 /* Create (set fp mem). */
1218 reg = gen_rtx_REG (SImode, FP_REGNUM);
1219 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1220 stack_pointer_rtx,
1221 offset));
1222 pop_rtx = gen_rtx_SET (reg, mem);
1223 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
1224 RTX_FRAME_RELATED_P (pop_rtx) = 1;
1225 offset = offset + 4;
1226 par_index++;
1227 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
1228
1229 /* Create (set gp mem). */
1230 reg = gen_rtx_REG (SImode, GP_REGNUM);
1231 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1232 stack_pointer_rtx,
1233 offset));
1234 pop_rtx = gen_rtx_SET (reg, mem);
1235 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
1236 RTX_FRAME_RELATED_P (pop_rtx) = 1;
1237 offset = offset + 4;
1238 par_index++;
1239 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
1240
1241 /* Create (set lp mem ). */
1242 reg = gen_rtx_REG (SImode, LP_REGNUM);
1243 mem = gen_frame_mem (SImode, plus_constant (Pmode,
1244 stack_pointer_rtx,
1245 offset));
1246 pop_rtx = gen_rtx_SET (reg, mem);
1247 XVECEXP (parallel_insn, 0, par_index) = pop_rtx;
1248 RTX_FRAME_RELATED_P (pop_rtx) = 1;
1249 offset = offset + 4;
1250 par_index++;
1251 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
1252
1253 /* Create (set sp sp+x+imm8u). */
1254
1255 /* The offset value is already in place. No need to re-calculate it. */
1256 adjust_sp_rtx
1257 = gen_rtx_SET (stack_pointer_rtx,
1258 plus_constant (Pmode,
1259 stack_pointer_rtx,
1260 offset + imm8u));
1261 XVECEXP (parallel_insn, 0, par_index) = adjust_sp_rtx;
1262
1263 if (frame_pointer_needed)
1264 {
1265 /* (expr_list:REG_CFA_DEF_CFA (plus:SI (reg/f:SI $sp)
1266 (const_int 0))
1267 mean reset frame pointer to $sp and reset to offset 0. */
1268 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
1269 const0_rtx);
1270 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
1271 }
1272 else
1273 {
1274 /* Tell gcc we adjust SP in this insn. */
1275 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA,
1276 copy_rtx (adjust_sp_rtx), dwarf);
1277 }
1278
1279 parallel_insn = emit_insn (parallel_insn);
1280
1281 /* The insn rtx 'parallel_insn' will change frame layout.
1282 We need to use RTX_FRAME_RELATED_P so that GCC is able to
1283 generate CFI (Call Frame Information) stuff. */
1284 RTX_FRAME_RELATED_P (parallel_insn) = 1;
1285
1286 /* Add CFI info by manual. */
1287 REG_NOTES (parallel_insn) = dwarf;
1288 }
1289
1290 static void
1291 nds32_emit_load_gp (void)
1292 {
1293 rtx got_symbol, pat;
1294
1295 /* Initial GLOBAL OFFSET TABLE don't do the scheduling. */
1296 emit_insn (gen_blockage ());
1297
1298 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1299 /* sethi $gp, _GLOBAL_OFFSET_TABLE_ -8 */
1300 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, got_symbol), UNSPEC_GOTINIT);
1301 pat = gen_rtx_CONST (SImode, gen_rtx_PLUS (Pmode, pat, GEN_INT (-8)));
1302 emit_insn (gen_sethi (pic_offset_table_rtx,pat));
1303
1304 /* ori $gp, $gp, _GLOBAL_OFFSET_TABLE_ -4 */
1305 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, got_symbol), UNSPEC_GOTINIT);
1306 pat = gen_rtx_CONST (SImode, gen_rtx_PLUS (Pmode, pat, GEN_INT (-4)));
1307 emit_insn (gen_lo_sum (pic_offset_table_rtx, pic_offset_table_rtx, pat));
1308
1309 /* add5.pc $gp */
1310 emit_insn (gen_add_pc (pic_offset_table_rtx, pic_offset_table_rtx));
1311
1312 /* Initial GLOBAL OFFSET TABLE don't do the scheduling. */
1313 emit_insn (gen_blockage ());
1314 }
1315
1316 /* Function that may creates more instructions
1317 for large value on adjusting stack pointer.
1318
1319 In nds32 target, 'addi' can be used for stack pointer
1320 adjustment in prologue/epilogue stage.
1321 However, sometimes there are too many local variables so that
1322 the adjustment value is not able to be fit in the 'addi' instruction.
1323 One solution is to move value into a register
1324 and then use 'add' instruction.
1325 In practice, we use TA_REGNUM ($r15) to accomplish this purpose. */
1326 static void
1327 nds32_emit_adjust_frame (rtx to_reg, rtx from_reg, int adjust_value)
1328 {
1329 rtx tmp_reg;
1330 rtx frame_adjust_insn;
1331 rtx adjust_value_rtx = GEN_INT (adjust_value);
1332
1333 if (adjust_value == 0)
1334 return;
1335
1336 if (!satisfies_constraint_Is15 (adjust_value_rtx))
1337 {
1338 /* The value is not able to fit in single addi instruction.
1339 Create more instructions of moving value into a register
1340 and then add stack pointer with it. */
1341
1342 /* $r15 is going to be temporary register to hold the value. */
1343 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
1344
1345 /* Create one more instruction to move value
1346 into the temporary register. */
1347 emit_move_insn (tmp_reg, adjust_value_rtx);
1348
1349 /* Create new 'add' rtx. */
1350 frame_adjust_insn = gen_addsi3 (to_reg,
1351 from_reg,
1352 tmp_reg);
1353 /* Emit rtx into insn list and receive its transformed insn rtx. */
1354 frame_adjust_insn = emit_insn (frame_adjust_insn);
1355
1356 /* Because (tmp_reg <- full_value) may be split into two
1357 rtl patterns, we cannot set its RTX_FRAME_RELATED_P.
1358 We need to construct another (sp <- sp + full_value)
1359 and then insert it into sp_adjust_insn's reg note to
1360 represent a frame related expression.
1361 GCC knows how to refer it and output debug information. */
1362
1363 rtx plus_rtx;
1364 rtx set_rtx;
1365
1366 plus_rtx = plus_constant (Pmode, from_reg, adjust_value);
1367 set_rtx = gen_rtx_SET (to_reg, plus_rtx);
1368 add_reg_note (frame_adjust_insn, REG_FRAME_RELATED_EXPR, set_rtx);
1369 }
1370 else
1371 {
1372 /* Generate sp adjustment instruction if and only if sp_adjust != 0. */
1373 frame_adjust_insn = gen_addsi3 (to_reg,
1374 from_reg,
1375 adjust_value_rtx);
1376 /* Emit rtx into instructions list and receive INSN rtx form. */
1377 frame_adjust_insn = emit_insn (frame_adjust_insn);
1378 }
1379
1380 /* The insn rtx 'sp_adjust_insn' will change frame layout.
1381 We need to use RTX_FRAME_RELATED_P so that GCC is able to
1382 generate CFI (Call Frame Information) stuff. */
1383 RTX_FRAME_RELATED_P (frame_adjust_insn) = 1;
1384 }
1385
1386 /* Return true if MODE/TYPE need double word alignment. */
1387 static bool
1388 nds32_needs_double_word_align (machine_mode mode, const_tree type)
1389 {
1390 unsigned int align;
1391
1392 /* Pick up the alignment according to the mode or type. */
1393 align = NDS32_MODE_TYPE_ALIGN (mode, type);
1394
1395 return (align > PARM_BOUNDARY);
1396 }
1397
1398 /* Return true if FUNC is a naked function. */
1399 bool
1400 nds32_naked_function_p (tree func)
1401 {
1402 /* FOR BACKWARD COMPATIBILITY,
1403 we need to support 'no_prologue' attribute as well. */
1404 tree t_naked;
1405 tree t_no_prologue;
1406
1407 if (TREE_CODE (func) != FUNCTION_DECL)
1408 abort ();
1409
1410 /* We have to use lookup_attribute() to check attributes.
1411 Because attr_naked_p and attr_no_prologue_p are set in
1412 nds32_compute_stack_frame() and the function has not been
1413 invoked yet. */
1414 t_naked = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
1415 t_no_prologue = lookup_attribute ("no_prologue", DECL_ATTRIBUTES (func));
1416
1417 return ((t_naked != NULL_TREE) || (t_no_prologue != NULL_TREE));
1418 }
1419
1420 /* Function that determine whether a load postincrement is a good thing to use
1421 for a given mode. */
1422 bool
1423 nds32_use_load_post_increment (machine_mode mode)
1424 {
1425 return (GET_MODE_SIZE (mode) <= GET_MODE_SIZE(E_DImode));
1426 }
1427
1428 /* Function that check if 'X' is a valid address register.
1429 The variable 'STRICT' is very important to
1430 make decision for register number.
1431
1432 STRICT : true
1433 => We are in reload pass or after reload pass.
1434 The register number should be strictly limited in general registers.
1435
1436 STRICT : false
1437 => Before reload pass, we are free to use any register number. */
1438 static bool
1439 nds32_address_register_rtx_p (rtx x, bool strict)
1440 {
1441 int regno;
1442
1443 if (GET_CODE (x) != REG)
1444 return false;
1445
1446 regno = REGNO (x);
1447
1448 if (strict)
1449 return REGNO_OK_FOR_BASE_P (regno);
1450 else
1451 return true;
1452 }
1453
1454 /* Function that check if 'INDEX' is valid to be a index rtx for address.
1455
1456 OUTER_MODE : Machine mode of outer address rtx.
1457 INDEX : Check if this rtx is valid to be a index for address.
1458 STRICT : If it is true, we are in reload pass or after reload pass. */
1459 static bool
1460 nds32_legitimate_index_p (machine_mode outer_mode,
1461 rtx index,
1462 bool strict)
1463 {
1464 int regno;
1465 rtx op0;
1466 rtx op1;
1467
1468 switch (GET_CODE (index))
1469 {
1470 case REG:
1471 regno = REGNO (index);
1472 /* If we are in reload pass or after reload pass,
1473 we need to limit it to general register. */
1474 if (strict)
1475 return REGNO_OK_FOR_INDEX_P (regno);
1476 else
1477 return true;
1478
1479 case CONST_INT:
1480 /* The alignment of the integer value is determined by 'outer_mode'. */
1481 switch (GET_MODE_SIZE (outer_mode))
1482 {
1483 case 1:
1484 /* Further check if the value is legal for the 'outer_mode'. */
1485 if (satisfies_constraint_Is15 (index))
1486 return true;
1487 break;
1488
1489 case 2:
1490 /* Further check if the value is legal for the 'outer_mode'. */
1491 if (satisfies_constraint_Is16 (index))
1492 {
1493 /* If it is not under strictly aligned situation,
1494 we can return true without checking alignment. */
1495 if (!cfun->machine->strict_aligned_p)
1496 return true;
1497 /* Make sure address is half word alignment. */
1498 else if (NDS32_HALF_WORD_ALIGN_P (INTVAL (index)))
1499 return true;
1500 }
1501 break;
1502
1503 case 4:
1504 /* Further check if the value is legal for the 'outer_mode'. */
1505 if (satisfies_constraint_Is17 (index))
1506 {
1507 if ((TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE))
1508 {
1509 if (!satisfies_constraint_Is14 (index))
1510 return false;
1511 }
1512
1513 /* If it is not under strictly aligned situation,
1514 we can return true without checking alignment. */
1515 if (!cfun->machine->strict_aligned_p)
1516 return true;
1517 /* Make sure address is word alignment. */
1518 else if (NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1519 return true;
1520 }
1521 break;
1522
1523 case 8:
1524 if (satisfies_constraint_Is17 (gen_int_mode (INTVAL (index) + 4,
1525 SImode)))
1526 {
1527 if ((TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE))
1528 {
1529 if (!satisfies_constraint_Is14 (index))
1530 return false;
1531 }
1532
1533 /* If it is not under strictly aligned situation,
1534 we can return true without checking alignment. */
1535 if (!cfun->machine->strict_aligned_p)
1536 return true;
1537 /* Make sure address is word alignment.
1538 Currently we do not have 64-bit load/store yet,
1539 so we will use two 32-bit load/store instructions to do
1540 memory access and they are single word alignment. */
1541 else if (NDS32_SINGLE_WORD_ALIGN_P (INTVAL (index)))
1542 return true;
1543 }
1544 break;
1545
1546 default:
1547 return false;
1548 }
1549
1550 return false;
1551
1552 case MULT:
1553 op0 = XEXP (index, 0);
1554 op1 = XEXP (index, 1);
1555
1556 if (REG_P (op0) && CONST_INT_P (op1))
1557 {
1558 int multiplier;
1559 multiplier = INTVAL (op1);
1560
1561 /* We only allow (mult reg const_int_1), (mult reg const_int_2),
1562 (mult reg const_int_4) or (mult reg const_int_8). */
1563 if (multiplier != 1 && multiplier != 2
1564 && multiplier != 4 && multiplier != 8)
1565 return false;
1566
1567 regno = REGNO (op0);
1568 /* Limit it in general registers if we are
1569 in reload pass or after reload pass. */
1570 if(strict)
1571 return REGNO_OK_FOR_INDEX_P (regno);
1572 else
1573 return true;
1574 }
1575
1576 return false;
1577
1578 case ASHIFT:
1579 op0 = XEXP (index, 0);
1580 op1 = XEXP (index, 1);
1581
1582 if (REG_P (op0) && CONST_INT_P (op1))
1583 {
1584 int sv;
1585 /* op1 is already the sv value for use to do left shift. */
1586 sv = INTVAL (op1);
1587
1588 /* We only allow (ashift reg const_int_0)
1589 or (ashift reg const_int_1) or (ashift reg const_int_2) or
1590 (ashift reg const_int_3). */
1591 if (sv != 0 && sv != 1 && sv !=2 && sv != 3)
1592 return false;
1593
1594 regno = REGNO (op0);
1595 /* Limit it in general registers if we are
1596 in reload pass or after reload pass. */
1597 if(strict)
1598 return REGNO_OK_FOR_INDEX_P (regno);
1599 else
1600 return true;
1601 }
1602
1603 return false;
1604
1605 default:
1606 return false;
1607 }
1608 }
1609
1610 static void
1611 nds32_register_pass (
1612 rtl_opt_pass *(*make_pass_func) (gcc::context *),
1613 enum pass_positioning_ops pass_pos,
1614 const char *ref_pass_name)
1615 {
1616 opt_pass *new_opt_pass = make_pass_func (g);
1617
1618 struct register_pass_info insert_pass =
1619 {
1620 new_opt_pass, /* pass */
1621 ref_pass_name, /* reference_pass_name */
1622 1, /* ref_pass_instance_number */
1623 pass_pos /* po_op */
1624 };
1625
1626 register_pass (&insert_pass);
1627 }
1628
1629 /* This function is called from nds32_option_override ().
1630 All new passes should be registered here. */
1631 static void
1632 nds32_register_passes (void)
1633 {
1634 nds32_register_pass (
1635 make_pass_nds32_fp_as_gp,
1636 PASS_POS_INSERT_BEFORE,
1637 "ira");
1638
1639 nds32_register_pass (
1640 make_pass_nds32_relax_opt,
1641 PASS_POS_INSERT_AFTER,
1642 "mach");
1643 }
1644
1645 /* ------------------------------------------------------------------------ */
1646
1647 /* PART 3: Implement target hook stuff definitions. */
1648 \f
1649
1650 /* Computing the Length of an Insn.
1651 Modifies the length assigned to instruction INSN.
1652 LEN is the initially computed length of the insn. */
1653 int
1654 nds32_adjust_insn_length (rtx_insn *insn, int length)
1655 {
1656 int adjust_value = 0;
1657 switch (recog_memoized (insn))
1658 {
1659 case CODE_FOR_call_internal:
1660 case CODE_FOR_call_value_internal:
1661 {
1662 if (NDS32_ALIGN_P ())
1663 {
1664 rtx_insn *next_insn = next_active_insn (insn);
1665 if (next_insn && get_attr_length (next_insn) != 2)
1666 adjust_value += 2;
1667 }
1668 /* We need insert a nop after a noretun function call
1669 to prevent software breakpoint corrupt the next function. */
1670 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
1671 {
1672 if (TARGET_16_BIT)
1673 adjust_value += 2;
1674 else
1675 adjust_value += 4;
1676 }
1677 }
1678 return length + adjust_value;
1679
1680 default:
1681 return length;
1682 }
1683 }
1684
1685 /* Storage Layout. */
1686
1687 /* This function will be called just before expansion into rtl. */
1688 static void
1689 nds32_expand_to_rtl_hook (void)
1690 {
1691 /* We need to set strictly aligned situation.
1692 After that, the memory address checking in nds32_legitimate_address_p()
1693 will take alignment offset into consideration so that it will not create
1694 unaligned [base + offset] access during the rtl optimization. */
1695 cfun->machine->strict_aligned_p = 1;
1696 }
1697
1698 \f
1699 /* Register Usage. */
1700
1701 static void
1702 nds32_conditional_register_usage (void)
1703 {
1704 int regno;
1705
1706 if (TARGET_LINUX_ABI)
1707 fixed_regs[TP_REGNUM] = 1;
1708
1709 if (TARGET_HARD_FLOAT)
1710 {
1711 for (regno = NDS32_FIRST_FPR_REGNUM;
1712 regno <= NDS32_LAST_FPR_REGNUM; regno++)
1713 {
1714 fixed_regs[regno] = 0;
1715 if (regno < NDS32_FIRST_FPR_REGNUM + NDS32_MAX_FPR_REGS_FOR_ARGS)
1716 call_used_regs[regno] = 1;
1717 else if (regno >= NDS32_FIRST_FPR_REGNUM + 22
1718 && regno < NDS32_FIRST_FPR_REGNUM + 48)
1719 call_used_regs[regno] = 1;
1720 else
1721 call_used_regs[regno] = 0;
1722 }
1723 }
1724 else if (TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE)
1725 {
1726 for (regno = NDS32_FIRST_FPR_REGNUM;
1727 regno <= NDS32_LAST_FPR_REGNUM;
1728 regno++)
1729 fixed_regs[regno] = 0;
1730 }
1731 }
1732
1733 \f
1734 /* Register Classes. */
1735
1736 static unsigned char
1737 nds32_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1738 machine_mode mode)
1739 {
1740 /* Return the maximum number of consecutive registers
1741 needed to represent "mode" in a register of "rclass". */
1742 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
1743 }
1744
1745 static int
1746 nds32_register_priority (int hard_regno)
1747 {
1748 /* Encourage to use r0-r7 for LRA when optimize for size. */
1749 if (optimize_size)
1750 {
1751 if (hard_regno < 8)
1752 return 4;
1753 else if (hard_regno < 16)
1754 return 3;
1755 else if (hard_regno < 28)
1756 return 2;
1757 else
1758 return 1;
1759 }
1760 else
1761 {
1762 if (hard_regno > 27)
1763 return 1;
1764 else
1765 return 4;
1766 }
1767 }
1768
1769 static bool
1770 nds32_can_change_mode_class (machine_mode from,
1771 machine_mode to,
1772 reg_class_t rclass)
1773 {
1774 /* Don't spill double-precision register to two singal-precision
1775 registers */
1776 if ((TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE)
1777 && GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
1778 {
1779 return !reg_classes_intersect_p (rclass, FP_REGS);
1780 }
1781
1782 return true;
1783 }
1784
1785 \f
1786 /* Stack Layout and Calling Conventions. */
1787
1788 /* There are three kinds of pointer concepts using in GCC compiler:
1789
1790 frame pointer: A pointer to the first location of local variables.
1791 stack pointer: A pointer to the top of a stack frame.
1792 argument pointer: A pointer to the incoming arguments.
1793
1794 In nds32 target calling convention, we are using 8-byte alignment.
1795 Besides, we would like to have each stack frame of a function includes:
1796
1797 [Block A]
1798 1. previous hard frame pointer
1799 2. return address
1800 3. callee-saved registers
1801 4. <padding bytes> (we will calculte in nds32_compute_stack_frame()
1802 and save it at
1803 cfun->machine->callee_saved_area_padding_bytes)
1804
1805 [Block B]
1806 1. local variables
1807 2. spilling location
1808 3. <padding bytes> (it will be calculated by GCC itself)
1809 4. incoming arguments
1810 5. <padding bytes> (it will be calculated by GCC itself)
1811
1812 [Block C]
1813 1. <padding bytes> (it will be calculated by GCC itself)
1814 2. outgoing arguments
1815
1816 We 'wrap' these blocks together with
1817 hard frame pointer ($r28) and stack pointer ($r31).
1818 By applying the basic frame/stack/argument pointers concept,
1819 the layout of a stack frame shoule be like this:
1820
1821 | |
1822 old stack pointer -> ----
1823 | | \
1824 | | saved arguments for
1825 | | vararg functions
1826 | | /
1827 hard frame pointer -> --
1828 & argument pointer | | \
1829 | | previous hardware frame pointer
1830 | | return address
1831 | | callee-saved registers
1832 | | /
1833 frame pointer -> --
1834 | | \
1835 | | local variables
1836 | | and incoming arguments
1837 | | /
1838 --
1839 | | \
1840 | | outgoing
1841 | | arguments
1842 | | /
1843 stack pointer -> ----
1844
1845 $SFP and $AP are used to represent frame pointer and arguments pointer,
1846 which will be both eliminated as hard frame pointer. */
1847
1848 /* -- Eliminating Frame Pointer and Arg Pointer. */
1849
1850 static bool
1851 nds32_can_eliminate (const int from_reg, const int to_reg)
1852 {
1853 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1854 return true;
1855
1856 if (from_reg == ARG_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1857 return true;
1858
1859 if (from_reg == FRAME_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
1860 return true;
1861
1862 if (from_reg == FRAME_POINTER_REGNUM && to_reg == HARD_FRAME_POINTER_REGNUM)
1863 return true;
1864
1865 return false;
1866 }
1867
1868 /* -- Passing Arguments in Registers. */
1869
1870 static rtx
1871 nds32_function_arg (cumulative_args_t ca, machine_mode mode,
1872 const_tree type, bool named)
1873 {
1874 unsigned int regno;
1875 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
1876
1877 /* The last time this hook is called,
1878 it is called with MODE == VOIDmode. */
1879 if (mode == VOIDmode)
1880 return NULL_RTX;
1881
1882 /* For nameless arguments, we need to take care it individually. */
1883 if (!named)
1884 {
1885 /* If we are under hard float abi, we have arguments passed on the
1886 stack and all situation can be handled by GCC itself. */
1887 if (TARGET_HARD_FLOAT)
1888 return NULL_RTX;
1889
1890 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1891 {
1892 /* If we still have enough registers to pass argument, pick up
1893 next available register number. */
1894 regno
1895 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1896 return gen_rtx_REG (mode, regno);
1897 }
1898
1899 /* No register available, return NULL_RTX.
1900 The compiler will use stack to pass argument instead. */
1901 return NULL_RTX;
1902 }
1903
1904 /* The following is to handle named argument.
1905 Note that the strategies of TARGET_HARD_FLOAT and !TARGET_HARD_FLOAT
1906 are different. */
1907 if (TARGET_HARD_FLOAT)
1908 {
1909 /* For TARGET_HARD_FLOAT calling convention, we use GPR and FPR
1910 to pass argument. We have to further check TYPE and MODE so
1911 that we can determine which kind of register we shall use. */
1912
1913 /* Note that we need to pass argument entirely in registers under
1914 hard float abi. */
1915 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1916 && NDS32_ARG_ENTIRE_IN_FPR_REG_P (cum->fpr_offset, mode, type))
1917 {
1918 /* Pick up the next available FPR register number. */
1919 regno
1920 = NDS32_AVAILABLE_REGNUM_FOR_FPR_ARG (cum->fpr_offset, mode, type);
1921 return gen_rtx_REG (mode, regno);
1922 }
1923 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
1924 && NDS32_ARG_ENTIRE_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1925 {
1926 /* Pick up the next available GPR register number. */
1927 regno
1928 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1929 return gen_rtx_REG (mode, regno);
1930 }
1931 }
1932 else
1933 {
1934 /* For !TARGET_HARD_FLOAT calling convention, we always use GPR to pass
1935 argument. Since we allow to pass argument partially in registers,
1936 we can just return it if there are still registers available. */
1937 if (NDS32_ARG_PARTIAL_IN_GPR_REG_P (cum->gpr_offset, mode, type))
1938 {
1939 /* Pick up the next available register number. */
1940 regno
1941 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type);
1942 return gen_rtx_REG (mode, regno);
1943 }
1944
1945 }
1946
1947 /* No register available, return NULL_RTX.
1948 The compiler will use stack to pass argument instead. */
1949 return NULL_RTX;
1950 }
1951
1952 static bool
1953 nds32_must_pass_in_stack (machine_mode mode, const_tree type)
1954 {
1955 /* Return true if a type must be passed in memory.
1956 If it is NOT using hard float abi, small aggregates can be
1957 passed in a register even we are calling a variadic function.
1958 So there is no need to take padding into consideration. */
1959 if (TARGET_HARD_FLOAT)
1960 return must_pass_in_stack_var_size_or_pad (mode, type);
1961 else
1962 return must_pass_in_stack_var_size (mode, type);
1963 }
1964
1965 static int
1966 nds32_arg_partial_bytes (cumulative_args_t ca, machine_mode mode,
1967 tree type, bool named ATTRIBUTE_UNUSED)
1968 {
1969 /* Returns the number of bytes at the beginning of an argument that
1970 must be put in registers. The value must be zero for arguments that are
1971 passed entirely in registers or that are entirely pushed on the stack.
1972 Besides, TARGET_FUNCTION_ARG for these arguments should return the
1973 first register to be used by the caller for this argument. */
1974 unsigned int needed_reg_count;
1975 unsigned int remaining_reg_count;
1976 CUMULATIVE_ARGS *cum;
1977
1978 cum = get_cumulative_args (ca);
1979
1980 /* Under hard float abi, we better have argument entirely passed in
1981 registers or pushed on the stack so that we can reduce the complexity
1982 of dealing with cum->gpr_offset and cum->fpr_offset. */
1983 if (TARGET_HARD_FLOAT)
1984 return 0;
1985
1986 /* If we have already runned out of argument registers, return zero
1987 so that the argument will be entirely pushed on the stack. */
1988 if (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
1989 >= NDS32_GPR_ARG_FIRST_REGNUM + NDS32_MAX_GPR_REGS_FOR_ARGS)
1990 return 0;
1991
1992 /* Calculate how many registers do we need for this argument. */
1993 needed_reg_count = NDS32_NEED_N_REGS_FOR_ARG (mode, type);
1994
1995 /* Calculate how many argument registers have left for passing argument.
1996 Note that we should count it from next available register number. */
1997 remaining_reg_count
1998 = NDS32_MAX_GPR_REGS_FOR_ARGS
1999 - (NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
2000 - NDS32_GPR_ARG_FIRST_REGNUM);
2001
2002 /* Note that we have to return the nubmer of bytes, not registers count. */
2003 if (needed_reg_count > remaining_reg_count)
2004 return remaining_reg_count * UNITS_PER_WORD;
2005
2006 return 0;
2007 }
2008
2009 static void
2010 nds32_function_arg_advance (cumulative_args_t ca, machine_mode mode,
2011 const_tree type, bool named)
2012 {
2013 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
2014
2015 if (named)
2016 {
2017 /* We need to further check TYPE and MODE so that we can determine
2018 which kind of register we shall advance. */
2019
2020 /* Under hard float abi, we may advance FPR registers. */
2021 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2022 {
2023 cum->fpr_offset
2024 = NDS32_AVAILABLE_REGNUM_FOR_FPR_ARG (cum->fpr_offset, mode, type)
2025 - NDS32_FPR_ARG_FIRST_REGNUM
2026 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
2027 }
2028 else
2029 {
2030 cum->gpr_offset
2031 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
2032 - NDS32_GPR_ARG_FIRST_REGNUM
2033 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
2034 }
2035 }
2036 else
2037 {
2038 /* If this nameless argument is NOT under TARGET_HARD_FLOAT,
2039 we can advance next register as well so that caller is
2040 able to pass arguments in registers and callee must be
2041 in charge of pushing all of them into stack. */
2042 if (!TARGET_HARD_FLOAT)
2043 {
2044 cum->gpr_offset
2045 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
2046 - NDS32_GPR_ARG_FIRST_REGNUM
2047 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
2048 }
2049 }
2050 }
2051
2052 static unsigned int
2053 nds32_function_arg_boundary (machine_mode mode, const_tree type)
2054 {
2055 return (nds32_needs_double_word_align (mode, type)
2056 ? NDS32_DOUBLE_WORD_ALIGNMENT
2057 : PARM_BOUNDARY);
2058 }
2059
2060 bool
2061 nds32_vector_mode_supported_p (machine_mode mode)
2062 {
2063 if (mode == V4QImode
2064 || mode == V2HImode)
2065 return NDS32_EXT_DSP_P ();
2066
2067 return false;
2068 }
2069
2070 /* -- How Scalar Function Values Are Returned. */
2071
2072 static rtx
2073 nds32_function_value (const_tree ret_type,
2074 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
2075 bool outgoing ATTRIBUTE_UNUSED)
2076 {
2077 machine_mode mode;
2078 int unsignedp;
2079
2080 mode = TYPE_MODE (ret_type);
2081 unsignedp = TYPE_UNSIGNED (ret_type);
2082
2083 if (INTEGRAL_TYPE_P (ret_type))
2084 mode = promote_mode (ret_type, mode, &unsignedp);
2085
2086 if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2087 return gen_rtx_REG (mode, NDS32_FPR_RET_FIRST_REGNUM);
2088 else
2089 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
2090 }
2091
2092 static rtx
2093 nds32_libcall_value (machine_mode mode,
2094 const_rtx fun ATTRIBUTE_UNUSED)
2095 {
2096 if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
2097 return gen_rtx_REG (mode, NDS32_FPR_RET_FIRST_REGNUM);
2098
2099 return gen_rtx_REG (mode, NDS32_GPR_RET_FIRST_REGNUM);
2100 }
2101
2102 static bool
2103 nds32_function_value_regno_p (const unsigned int regno)
2104 {
2105 if (regno == NDS32_GPR_RET_FIRST_REGNUM
2106 || (TARGET_HARD_FLOAT
2107 && regno == NDS32_FPR_RET_FIRST_REGNUM))
2108 return true;
2109
2110 return false;
2111 }
2112
2113 /* -- How Large Values Are Returned. */
2114
2115 static bool
2116 nds32_return_in_memory (const_tree type,
2117 const_tree fntype ATTRIBUTE_UNUSED)
2118 {
2119 /* Note that int_size_in_bytes can return -1 if the size can vary
2120 or is larger than an integer. */
2121 HOST_WIDE_INT size = int_size_in_bytes (type);
2122
2123 /* For COMPLEX_TYPE, if the total size cannot be hold within two registers,
2124 the return value is supposed to be in memory. We need to be aware of
2125 that the size may be -1. */
2126 if (TREE_CODE (type) == COMPLEX_TYPE)
2127 if (size < 0 || size > 2 * UNITS_PER_WORD)
2128 return true;
2129
2130 /* If it is BLKmode and the total size cannot be hold within two registers,
2131 the return value is supposed to be in memory. We need to be aware of
2132 that the size may be -1. */
2133 if (TYPE_MODE (type) == BLKmode)
2134 if (size < 0 || size > 2 * UNITS_PER_WORD)
2135 return true;
2136
2137 /* For other cases, having result in memory is unnecessary. */
2138 return false;
2139 }
2140
2141 /* -- Function Entry and Exit. */
2142
2143 /* The content produced from this function
2144 will be placed before prologue body. */
2145 static void
2146 nds32_asm_function_prologue (FILE *file)
2147 {
2148 int r;
2149 const char *func_name;
2150 tree attrs;
2151 tree name;
2152
2153 /* All stack frame information is supposed to be
2154 already computed when expanding prologue.
2155 The result is in cfun->machine.
2156 DO NOT call nds32_compute_stack_frame() here
2157 because it may corrupt the essential information. */
2158
2159 fprintf (file, "\t! BEGIN PROLOGUE\n");
2160 fprintf (file, "\t! fp needed: %d\n", frame_pointer_needed);
2161 fprintf (file, "\t! pretend_args: %d\n", cfun->machine->va_args_size);
2162 fprintf (file, "\t! local_size: %d\n", cfun->machine->local_size);
2163 fprintf (file, "\t! out_args_size: %d\n", cfun->machine->out_args_size);
2164
2165 /* Use df_regs_ever_live_p() to detect if the register
2166 is ever used in the current function. */
2167 fprintf (file, "\t! registers ever_live: ");
2168 for (r = 0; r < 65; r++)
2169 {
2170 if (df_regs_ever_live_p (r))
2171 fprintf (file, "%s, ", reg_names[r]);
2172 }
2173 fputc ('\n', file);
2174
2175 /* Display the attributes of this function. */
2176 fprintf (file, "\t! function attributes: ");
2177 /* Get the attributes tree list.
2178 Note that GCC builds attributes list with reverse order. */
2179 attrs = DECL_ATTRIBUTES (current_function_decl);
2180
2181 /* If there is no any attribute, print out "None". */
2182 if (!attrs)
2183 fprintf (file, "None");
2184
2185 /* If there are some attributes, try if we need to
2186 construct isr vector information. */
2187 func_name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
2188 nds32_construct_isr_vectors_information (attrs, func_name);
2189
2190 /* Display all attributes of this function. */
2191 while (attrs)
2192 {
2193 name = TREE_PURPOSE (attrs);
2194 fprintf (file, "%s ", IDENTIFIER_POINTER (name));
2195
2196 /* Pick up the next attribute. */
2197 attrs = TREE_CHAIN (attrs);
2198 }
2199 fputc ('\n', file);
2200 }
2201
2202 /* After rtl prologue has been expanded, this function is used. */
2203 static void
2204 nds32_asm_function_end_prologue (FILE *file)
2205 {
2206 fprintf (file, "\t! END PROLOGUE\n");
2207 }
2208
2209 /* Before rtl epilogue has been expanded, this function is used. */
2210 static void
2211 nds32_asm_function_begin_epilogue (FILE *file)
2212 {
2213 fprintf (file, "\t! BEGIN EPILOGUE\n");
2214 }
2215
2216 /* The content produced from this function
2217 will be placed after epilogue body. */
2218 static void
2219 nds32_asm_function_epilogue (FILE *file)
2220 {
2221 fprintf (file, "\t! END EPILOGUE\n");
2222 }
2223
2224 static void
2225 nds32_asm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
2226 HOST_WIDE_INT delta,
2227 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2228 tree function)
2229 {
2230 int this_regno;
2231
2232 /* Make sure unwind info is emitted for the thunk if needed. */
2233 final_start_function (emit_barrier (), file, 1);
2234
2235 this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
2236 ? 1
2237 : 0);
2238
2239 if (flag_pic)
2240 {
2241 fprintf (file, "\tsmw.adm\t$r31, [$r31], $r31, 4\n");
2242 fprintf (file, "\tsethi\t%s, hi20(_GLOBAL_OFFSET_TABLE_-8)\n",
2243 reg_names [PIC_OFFSET_TABLE_REGNUM]);
2244 fprintf (file, "\tori\t%s, %s, lo12(_GLOBAL_OFFSET_TABLE_-4)\n",
2245 reg_names [PIC_OFFSET_TABLE_REGNUM],
2246 reg_names [PIC_OFFSET_TABLE_REGNUM]);
2247
2248 if (TARGET_ISA_V3)
2249 fprintf (file, "\tadd5.pc\t$gp\n");
2250 else
2251 {
2252 fprintf (file, "\tmfusr\t$ta, $pc\n");
2253 fprintf (file, "\tadd\t%s, $ta, %s\n",
2254 reg_names [PIC_OFFSET_TABLE_REGNUM],
2255 reg_names [PIC_OFFSET_TABLE_REGNUM]);
2256 }
2257 }
2258
2259 if (delta != 0)
2260 {
2261 if (satisfies_constraint_Is15 (GEN_INT (delta)))
2262 {
2263 fprintf (file, "\taddi\t$r%d, $r%d, " HOST_WIDE_INT_PRINT_DEC "\n",
2264 this_regno, this_regno, delta);
2265 }
2266 else if (satisfies_constraint_Is20 (GEN_INT (delta)))
2267 {
2268 fprintf (file, "\tmovi\t$ta, " HOST_WIDE_INT_PRINT_DEC "\n", delta);
2269 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
2270 }
2271 else
2272 {
2273 fprintf (file,
2274 "\tsethi\t$ta, hi20(" HOST_WIDE_INT_PRINT_DEC ")\n",
2275 delta);
2276 fprintf (file,
2277 "\tori\t$ta, $ta, lo12(" HOST_WIDE_INT_PRINT_DEC ")\n",
2278 delta);
2279 fprintf (file, "\tadd\t$r%d, $r%d, $ta\n", this_regno, this_regno);
2280 }
2281 }
2282
2283 if (flag_pic)
2284 {
2285 fprintf (file, "\tla\t$ta, ");
2286 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2287 fprintf (file, "@PLT\n");
2288 fprintf (file, "\t! epilogue\n");
2289 fprintf (file, "\tlwi.bi\t%s, [%s], 4\n",
2290 reg_names[PIC_OFFSET_TABLE_REGNUM],
2291 reg_names[STACK_POINTER_REGNUM]);
2292 fprintf (file, "\tbr\t$ta\n");
2293 }
2294 else
2295 {
2296 fprintf (file, "\tb\t");
2297 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2298 fprintf (file, "\n");
2299 }
2300
2301 final_end_function ();
2302 }
2303
2304 /* -- Permitting tail calls. */
2305
2306 /* Return true if it is ok to do sibling call optimization. */
2307 static bool
2308 nds32_function_ok_for_sibcall (tree decl,
2309 tree exp ATTRIBUTE_UNUSED)
2310 {
2311 /* The DECL is NULL if it is an indirect call. */
2312
2313 /* 1. Do not apply sibling call if -mv3push is enabled,
2314 because pop25 instruction also represents return behavior.
2315 2. If this function is a isr function, do not apply sibling call
2316 because it may perform the behavior that user does not expect.
2317 3. If this function is a variadic function, do not apply sibling call
2318 because the stack layout may be a mess.
2319 4. We don't want to apply sibling call optimization for indirect
2320 sibcall because the pop behavior in epilogue may pollute the
2321 content of caller-saved regsiter when the register is used for
2322 indirect sibcall.
2323 5. In pic mode, it may use some registers for PLT call. */
2324 return (!TARGET_V3PUSH
2325 && !nds32_isr_function_p (current_function_decl)
2326 && (cfun->machine->va_args_size == 0)
2327 && decl
2328 && !flag_pic);
2329 }
2330
2331 /* Determine whether we need to enable warning for function return check. */
2332 static bool
2333 nds32_warn_func_return (tree decl)
2334 {
2335 /* Naked functions are implemented entirely in assembly, including the
2336 return sequence, so suppress warnings about this. */
2337 return !nds32_naked_function_p (decl);
2338 }
2339
2340 \f
2341 /* Implementing the Varargs Macros. */
2342
2343 static void
2344 nds32_setup_incoming_varargs (cumulative_args_t ca,
2345 machine_mode mode,
2346 tree type,
2347 int *pretend_args_size,
2348 int second_time ATTRIBUTE_UNUSED)
2349 {
2350 unsigned int total_args_regs;
2351 unsigned int num_of_used_regs;
2352 unsigned int remaining_reg_count;
2353 CUMULATIVE_ARGS *cum;
2354
2355 /* If we are under hard float abi, we do not need to set *pretend_args_size.
2356 So that all nameless arguments are pushed by caller and all situation
2357 can be handled by GCC itself. */
2358 if (TARGET_HARD_FLOAT)
2359 return;
2360
2361 /* We are using NDS32_MAX_GPR_REGS_FOR_ARGS registers,
2362 counting from NDS32_GPR_ARG_FIRST_REGNUM, for saving incoming arguments.
2363 However, for nameless(anonymous) arguments, we should push them on the
2364 stack so that all the nameless arguments appear to have been passed
2365 consecutively in the memory for accessing. Hence, we need to check and
2366 exclude the registers that are used for named arguments. */
2367
2368 cum = get_cumulative_args (ca);
2369
2370 /* The MODE and TYPE describe the last argument.
2371 We need those information to determine the remaining registers
2372 for varargs. */
2373 total_args_regs
2374 = NDS32_MAX_GPR_REGS_FOR_ARGS + NDS32_GPR_ARG_FIRST_REGNUM;
2375 num_of_used_regs
2376 = NDS32_AVAILABLE_REGNUM_FOR_GPR_ARG (cum->gpr_offset, mode, type)
2377 + NDS32_NEED_N_REGS_FOR_ARG (mode, type);
2378
2379 remaining_reg_count = total_args_regs - num_of_used_regs;
2380 *pretend_args_size = remaining_reg_count * UNITS_PER_WORD;
2381
2382 return;
2383 }
2384
2385 static bool
2386 nds32_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
2387 {
2388 /* If this hook returns true, the named argument of FUNCTION_ARG is always
2389 true for named arguments, and false for unnamed arguments. */
2390 return true;
2391 }
2392
2393 \f
2394 /* Trampolines for Nested Functions. */
2395
2396 static void
2397 nds32_asm_trampoline_template (FILE *f)
2398 {
2399 if (TARGET_REDUCED_REGS)
2400 {
2401 /* Trampoline is not supported on reduced-set registers yet. */
2402 sorry ("a nested function is not supported for reduced registers");
2403 }
2404 else
2405 {
2406 asm_fprintf (f, "\t! Trampoline code template\n");
2407 asm_fprintf (f, "\t! This code fragment will be copied "
2408 "into stack on demand\n");
2409
2410 asm_fprintf (f, "\tmfusr\t$r16,$pc\n");
2411 asm_fprintf (f, "\tlwi\t$r15,[$r16 + 20] "
2412 "! load nested function address\n");
2413 asm_fprintf (f, "\tlwi\t$r16,[$r16 + 16] "
2414 "! load chain_value\n");
2415 asm_fprintf (f, "\tjr\t$r15\n");
2416 }
2417
2418 /* Preserve space ($pc + 16) for saving chain_value,
2419 nds32_trampoline_init will fill the value in this slot. */
2420 asm_fprintf (f, "\t! space for saving chain_value\n");
2421 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
2422
2423 /* Preserve space ($pc + 20) for saving nested function address,
2424 nds32_trampoline_init will fill the value in this slot. */
2425 asm_fprintf (f, "\t! space for saving nested function address\n");
2426 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
2427 }
2428
2429 /* Emit RTL insns to initialize the variable parts of a trampoline. */
2430 static void
2431 nds32_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2432 {
2433 int i;
2434
2435 /* Nested function address. */
2436 rtx fnaddr;
2437 /* The memory rtx that is going to
2438 be filled with chain_value. */
2439 rtx chain_value_mem;
2440 /* The memory rtx that is going to
2441 be filled with nested function address. */
2442 rtx nested_func_mem;
2443
2444 /* Start address of trampoline code in stack, for doing cache sync. */
2445 rtx sync_cache_addr;
2446 /* Temporary register for sync instruction. */
2447 rtx tmp_reg;
2448 /* Instruction-cache sync instruction,
2449 requesting an argument as starting address. */
2450 rtx isync_insn;
2451 /* For convenience reason of doing comparison. */
2452 int tramp_align_in_bytes;
2453
2454 /* Trampoline is not supported on reduced-set registers yet. */
2455 if (TARGET_REDUCED_REGS)
2456 sorry ("a nested function is not supported for reduced registers");
2457
2458 /* STEP 1: Copy trampoline code template into stack,
2459 fill up essential data into stack. */
2460
2461 /* Extract nested function address rtx. */
2462 fnaddr = XEXP (DECL_RTL (fndecl), 0);
2463
2464 /* m_tramp is memory rtx that is going to be filled with trampoline code.
2465 We have nds32_asm_trampoline_template() to emit template pattern. */
2466 emit_block_move (m_tramp, assemble_trampoline_template (),
2467 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
2468
2469 /* After copying trampoline code into stack,
2470 fill chain_value into stack. */
2471 chain_value_mem = adjust_address (m_tramp, SImode, 16);
2472 emit_move_insn (chain_value_mem, chain_value);
2473 /* After copying trampoline code int stack,
2474 fill nested function address into stack. */
2475 nested_func_mem = adjust_address (m_tramp, SImode, 20);
2476 emit_move_insn (nested_func_mem, fnaddr);
2477
2478 /* STEP 2: Sync instruction-cache. */
2479
2480 /* We have successfully filled trampoline code into stack.
2481 However, in order to execute code in stack correctly,
2482 we must sync instruction cache. */
2483 sync_cache_addr = XEXP (m_tramp, 0);
2484 tmp_reg = gen_reg_rtx (SImode);
2485 isync_insn = gen_unspec_volatile_isync (tmp_reg);
2486
2487 /* Because nds32_cache_block_size is in bytes,
2488 we get trampoline alignment in bytes for convenient comparison. */
2489 tramp_align_in_bytes = TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT;
2490
2491 if (tramp_align_in_bytes >= nds32_cache_block_size
2492 && (tramp_align_in_bytes % nds32_cache_block_size) == 0)
2493 {
2494 /* Under this condition, the starting address of trampoline
2495 must be aligned to the starting address of each cache block
2496 and we do not have to worry about cross-boundary issue. */
2497 for (i = 0;
2498 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
2499 / nds32_cache_block_size;
2500 i++)
2501 {
2502 emit_move_insn (tmp_reg,
2503 plus_constant (Pmode, sync_cache_addr,
2504 nds32_cache_block_size * i));
2505 emit_insn (isync_insn);
2506 }
2507 }
2508 else if (TRAMPOLINE_SIZE > nds32_cache_block_size)
2509 {
2510 /* The starting address of trampoline code
2511 may not be aligned to the cache block,
2512 so the trampoline code may be across two cache block.
2513 We need to sync the last element, which is 4-byte size,
2514 of trampoline template. */
2515 for (i = 0;
2516 i < (TRAMPOLINE_SIZE + nds32_cache_block_size - 1)
2517 / nds32_cache_block_size;
2518 i++)
2519 {
2520 emit_move_insn (tmp_reg,
2521 plus_constant (Pmode, sync_cache_addr,
2522 nds32_cache_block_size * i));
2523 emit_insn (isync_insn);
2524 }
2525
2526 /* The last element of trampoline template is 4-byte size. */
2527 emit_move_insn (tmp_reg,
2528 plus_constant (Pmode, sync_cache_addr,
2529 TRAMPOLINE_SIZE - 4));
2530 emit_insn (isync_insn);
2531 }
2532 else
2533 {
2534 /* This is the simplest case.
2535 Because TRAMPOLINE_SIZE is less than or
2536 equal to nds32_cache_block_size,
2537 we can just sync start address and
2538 the last element of trampoline code. */
2539
2540 /* Sync starting address of tampoline code. */
2541 emit_move_insn (tmp_reg, sync_cache_addr);
2542 emit_insn (isync_insn);
2543 /* Sync the last element, which is 4-byte size,
2544 of trampoline template. */
2545 emit_move_insn (tmp_reg,
2546 plus_constant (Pmode, sync_cache_addr,
2547 TRAMPOLINE_SIZE - 4));
2548 emit_insn (isync_insn);
2549 }
2550
2551 /* Set instruction serialization barrier
2552 to guarantee the correct operations. */
2553 emit_insn (gen_unspec_volatile_isb ());
2554 }
2555
2556 \f
2557 /* Addressing Modes. */
2558
2559 static bool
2560 nds32_legitimate_address_p (machine_mode mode, rtx x, bool strict)
2561 {
2562 if (TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE)
2563 {
2564 /* When using floating-point instructions,
2565 we don't allow 'addr' to be [symbol_ref], [CONST] pattern. */
2566 if ((mode == DFmode || mode == SFmode)
2567 && (GET_CODE (x) == SYMBOL_REF
2568 || GET_CODE(x) == CONST))
2569 return false;
2570
2571 /* Allow [post_modify] addressing mode, when using FPU instructions. */
2572 if (GET_CODE (x) == POST_MODIFY
2573 && mode == DFmode)
2574 {
2575 if (GET_CODE (XEXP (x, 0)) == REG
2576 && GET_CODE (XEXP (x, 1)) == PLUS)
2577 {
2578 rtx plus_op = XEXP (x, 1);
2579 rtx op0 = XEXP (plus_op, 0);
2580 rtx op1 = XEXP (plus_op, 1);
2581
2582 if (nds32_address_register_rtx_p (op0, strict)
2583 && CONST_INT_P (op1))
2584 {
2585 if (satisfies_constraint_Is14 (op1))
2586 {
2587 /* If it is not under strictly aligned situation,
2588 we can return true without checking alignment. */
2589 if (!cfun->machine->strict_aligned_p)
2590 return true;
2591 /* Make sure address is word alignment.
2592 Currently we do not have 64-bit load/store yet,
2593 so we will use two 32-bit load/store instructions to do
2594 memory access and they are single word alignment. */
2595 else if (NDS32_SINGLE_WORD_ALIGN_P (INTVAL (op1)))
2596 return true;
2597 }
2598 }
2599 }
2600 }
2601 }
2602
2603 /* For (mem:DI addr) or (mem:DF addr) case,
2604 we only allow 'addr' to be [reg], [symbol_ref],
2605 [const], or [reg + const_int] pattern. */
2606 if (mode == DImode || mode == DFmode)
2607 {
2608 /* Allow [Reg + const_int] addressing mode. */
2609 if (GET_CODE (x) == PLUS)
2610 {
2611 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2612 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict)
2613 && CONST_INT_P (XEXP (x, 1)))
2614 return true;
2615 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2616 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict)
2617 && CONST_INT_P (XEXP (x, 0)))
2618 return true;
2619 }
2620
2621 /* Allow [post_inc] and [post_dec] addressing mode. */
2622 if (GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
2623 {
2624 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
2625 return true;
2626 }
2627
2628 /* Now check [reg], [symbol_ref], and [const]. */
2629 if (GET_CODE (x) != REG
2630 && GET_CODE (x) != SYMBOL_REF
2631 && GET_CODE (x) != CONST)
2632 return false;
2633 }
2634
2635 /* Check if 'x' is a valid address. */
2636 switch (GET_CODE (x))
2637 {
2638 case REG:
2639 /* (mem (reg A)) => [Ra] */
2640 return nds32_address_register_rtx_p (x, strict);
2641
2642 case SYMBOL_REF:
2643 /* (mem (symbol_ref A)) => [symbol_ref] */
2644
2645 if (flag_pic || SYMBOL_REF_TLS_MODEL (x))
2646 return false;
2647
2648 if (TARGET_ICT_MODEL_LARGE && nds32_indirect_call_referenced_p (x))
2649 return false;
2650
2651 /* If -mcmodel=large, the 'symbol_ref' is not a valid address
2652 during or after LRA/reload phase. */
2653 if (TARGET_CMODEL_LARGE
2654 && (reload_completed
2655 || reload_in_progress
2656 || lra_in_progress))
2657 return false;
2658 /* If -mcmodel=medium and the symbol references to rodata section,
2659 the 'symbol_ref' is not a valid address during or after
2660 LRA/reload phase. */
2661 if (TARGET_CMODEL_MEDIUM
2662 && (NDS32_SYMBOL_REF_RODATA_P (x)
2663 || CONSTANT_POOL_ADDRESS_P (x))
2664 && (reload_completed
2665 || reload_in_progress
2666 || lra_in_progress))
2667 return false;
2668
2669 return true;
2670
2671 case CONST:
2672 /* (mem (const (...)))
2673 => [ + const_addr ], where const_addr = symbol_ref + const_int */
2674 if (GET_CODE (XEXP (x, 0)) == PLUS)
2675 {
2676 rtx plus_op = XEXP (x, 0);
2677
2678 rtx op0 = XEXP (plus_op, 0);
2679 rtx op1 = XEXP (plus_op, 1);
2680
2681 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
2682 {
2683 /* Now we see the [ + const_addr ] pattern, but we need
2684 some further checking. */
2685
2686 if (flag_pic || SYMBOL_REF_TLS_MODEL (op0))
2687 return false;
2688
2689 /* If -mcmodel=large, the 'const_addr' is not a valid address
2690 during or after LRA/reload phase. */
2691 if (TARGET_CMODEL_LARGE
2692 && (reload_completed
2693 || reload_in_progress
2694 || lra_in_progress))
2695 return false;
2696 /* If -mcmodel=medium and the symbol references to rodata section,
2697 the 'const_addr' is not a valid address during or after
2698 LRA/reload phase. */
2699 if (TARGET_CMODEL_MEDIUM
2700 && NDS32_SYMBOL_REF_RODATA_P (op0)
2701 && (reload_completed
2702 || reload_in_progress
2703 || lra_in_progress))
2704 return false;
2705
2706 /* At this point we can make sure 'const_addr' is a
2707 valid address. */
2708 return true;
2709 }
2710 }
2711
2712 return false;
2713
2714 case POST_MODIFY:
2715 /* (mem (post_modify (reg) (plus (reg) (reg))))
2716 => [Ra], Rb */
2717 /* (mem (post_modify (reg) (plus (reg) (const_int))))
2718 => [Ra], const_int */
2719 if (GET_CODE (XEXP (x, 0)) == REG
2720 && GET_CODE (XEXP (x, 1)) == PLUS)
2721 {
2722 rtx plus_op = XEXP (x, 1);
2723
2724 rtx op0 = XEXP (plus_op, 0);
2725 rtx op1 = XEXP (plus_op, 1);
2726
2727 if (nds32_address_register_rtx_p (op0, strict)
2728 && nds32_legitimate_index_p (mode, op1, strict))
2729 return true;
2730 else
2731 return false;
2732 }
2733
2734 return false;
2735
2736 case POST_INC:
2737 case POST_DEC:
2738 /* (mem (post_inc reg)) => [Ra], 1/2/4 */
2739 /* (mem (post_dec reg)) => [Ra], -1/-2/-4 */
2740 /* The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
2741 We only need to deal with register Ra. */
2742 if (nds32_address_register_rtx_p (XEXP (x, 0), strict))
2743 return true;
2744 else
2745 return false;
2746
2747 case PLUS:
2748 /* (mem (plus reg const_int))
2749 => [Ra + imm] */
2750 /* (mem (plus reg reg))
2751 => [Ra + Rb] */
2752 /* (mem (plus (mult reg const_int) reg))
2753 => [Ra + Rb << sv] */
2754 if (nds32_address_register_rtx_p (XEXP (x, 0), strict)
2755 && nds32_legitimate_index_p (mode, XEXP (x, 1), strict))
2756 return true;
2757 else if (nds32_address_register_rtx_p (XEXP (x, 1), strict)
2758 && nds32_legitimate_index_p (mode, XEXP (x, 0), strict))
2759 return true;
2760 else
2761 return false;
2762
2763 case LO_SUM:
2764 /* (mem (lo_sum (reg) (symbol_ref))) */
2765 /* (mem (lo_sum (reg) (const (plus (symbol_ref) (reg)))) */
2766 /* TLS case: (mem (lo_sum (reg) (const (unspec symbol_ref X)))) */
2767 /* The LO_SUM is a valid address if and only if we would like to
2768 generate 32-bit full address memory access with any of following
2769 circumstance:
2770 1. -mcmodel=large.
2771 2. -mcmodel=medium and the symbol_ref references to rodata. */
2772 {
2773 rtx sym = NULL_RTX;
2774
2775 if (flag_pic)
2776 return false;
2777
2778 if (!REG_P (XEXP (x, 0)))
2779 return false;
2780
2781 if (GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
2782 sym = XEXP (x, 1);
2783 else if (GET_CODE (XEXP (x, 1)) == CONST)
2784 {
2785 rtx plus = XEXP(XEXP (x, 1), 0);
2786 if (GET_CODE (plus) == PLUS)
2787 sym = XEXP (plus, 0);
2788 else if (GET_CODE (plus) == UNSPEC)
2789 sym = XVECEXP (plus, 0, 0);
2790 }
2791 else
2792 return false;
2793
2794 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
2795
2796 if (TARGET_ICT_MODEL_LARGE
2797 && nds32_indirect_call_referenced_p (sym))
2798 return true;
2799
2800 if (TARGET_CMODEL_LARGE)
2801 return true;
2802 else if (TARGET_CMODEL_MEDIUM
2803 && NDS32_SYMBOL_REF_RODATA_P (sym))
2804 return true;
2805 else
2806 return false;
2807 }
2808
2809 default:
2810 return false;
2811 }
2812 }
2813
2814 static rtx
2815 nds32_legitimize_address (rtx x,
2816 rtx oldx ATTRIBUTE_UNUSED,
2817 machine_mode mode ATTRIBUTE_UNUSED)
2818 {
2819 if (nds32_tls_referenced_p (x))
2820 x = nds32_legitimize_tls_address (x);
2821 else if (flag_pic && SYMBOLIC_CONST_P (x))
2822 x = nds32_legitimize_pic_address (x);
2823 else if (TARGET_ICT_MODEL_LARGE && nds32_indirect_call_referenced_p (x))
2824 x = nds32_legitimize_ict_address (x);
2825
2826 return x;
2827 }
2828
2829 static bool
2830 nds32_legitimate_constant_p (machine_mode mode, rtx x)
2831 {
2832 switch (GET_CODE (x))
2833 {
2834 case CONST_DOUBLE:
2835 if ((TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE)
2836 && (mode == DFmode || mode == SFmode))
2837 return false;
2838 break;
2839 case CONST:
2840 x = XEXP (x, 0);
2841
2842 if (GET_CODE (x) == PLUS)
2843 {
2844 if (!CONST_INT_P (XEXP (x, 1)))
2845 return false;
2846 x = XEXP (x, 0);
2847 }
2848
2849 if (GET_CODE (x) == UNSPEC)
2850 {
2851 switch (XINT (x, 1))
2852 {
2853 case UNSPEC_GOT:
2854 case UNSPEC_GOTOFF:
2855 case UNSPEC_PLT:
2856 case UNSPEC_TLSGD:
2857 case UNSPEC_TLSLD:
2858 case UNSPEC_TLSIE:
2859 case UNSPEC_TLSLE:
2860 case UNSPEC_ICT:
2861 return false;
2862 default:
2863 return true;
2864 }
2865 }
2866 break;
2867 case SYMBOL_REF:
2868 /* TLS symbols need a call to resolve in
2869 precompute_register_parameters. */
2870 if (SYMBOL_REF_TLS_MODEL (x))
2871 return false;
2872 break;
2873 default:
2874 return true;
2875 }
2876
2877 return true;
2878 }
2879
2880 /* Reorgnize the UNSPEC CONST and return its direct symbol. */
2881 static rtx
2882 nds32_delegitimize_address (rtx x)
2883 {
2884 x = delegitimize_mem_from_attrs (x);
2885
2886 if (GET_CODE(x) == CONST)
2887 {
2888 rtx inner = XEXP (x, 0);
2889
2890 /* Handle for GOTOFF. */
2891 if (GET_CODE (inner) == PLUS)
2892 inner = XEXP (inner, 0);
2893
2894 if (GET_CODE (inner) == UNSPEC)
2895 {
2896 switch (XINT (inner, 1))
2897 {
2898 case UNSPEC_GOTINIT:
2899 case UNSPEC_GOT:
2900 case UNSPEC_GOTOFF:
2901 case UNSPEC_PLT:
2902 case UNSPEC_TLSGD:
2903 case UNSPEC_TLSLD:
2904 case UNSPEC_TLSIE:
2905 case UNSPEC_TLSLE:
2906 case UNSPEC_ICT:
2907 x = XVECEXP (inner, 0, 0);
2908 break;
2909 default:
2910 break;
2911 }
2912 }
2913 }
2914 return x;
2915 }
2916
2917 static machine_mode
2918 nds32_vectorize_preferred_simd_mode (scalar_mode mode)
2919 {
2920 if (!NDS32_EXT_DSP_P ())
2921 return word_mode;
2922
2923 switch (mode)
2924 {
2925 case E_QImode:
2926 return V4QImode;
2927 case E_HImode:
2928 return V2HImode;
2929 default:
2930 return word_mode;
2931 }
2932 }
2933
2934 static bool
2935 nds32_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2936 {
2937 switch (GET_CODE (x))
2938 {
2939 case CONST:
2940 return !nds32_legitimate_constant_p (mode, x);
2941 case SYMBOL_REF:
2942 /* All symbols have to be accessed through gp-relative in PIC mode. */
2943 /* We don't want to force symbol as constant pool in .text section,
2944 because we use the gp-relatived instruction to load in small
2945 or medium model. */
2946 if (flag_pic
2947 || SYMBOL_REF_TLS_MODEL (x)
2948 || TARGET_CMODEL_SMALL
2949 || TARGET_CMODEL_MEDIUM)
2950 return true;
2951 break;
2952 case CONST_INT:
2953 case CONST_DOUBLE:
2954 if (flag_pic && (lra_in_progress || reload_completed))
2955 return true;
2956 break;
2957 default:
2958 return false;
2959 }
2960 return false;
2961 }
2962
2963 \f
2964 /* Condition Code Status. */
2965
2966 /* -- Representation of condition codes using registers. */
2967
2968 static void
2969 nds32_canonicalize_comparison (int *code,
2970 rtx *op0 ATTRIBUTE_UNUSED,
2971 rtx *op1,
2972 bool op0_preserve_value ATTRIBUTE_UNUSED)
2973 {
2974 /* When the instruction combination pass tries to combine a comparison insn
2975 with its previous insns, it also transforms the operator in order to
2976 minimize its constant field. For example, it tries to transform a
2977 comparison insn from
2978 (set (reg:SI 54)
2979 (ltu:SI (reg:SI 52)
2980 (const_int 10 [0xa])))
2981 to
2982 (set (reg:SI 54)
2983 (leu:SI (reg:SI 52)
2984 (const_int 9 [0x9])))
2985
2986 However, the nds32 target only provides instructions supporting the LTU
2987 operation directly, and the implementation of the pattern "cbranchsi4"
2988 only expands the LTU form. In order to handle the non-LTU operations
2989 generated from passes other than the RTL expansion pass, we have to
2990 implement this hook to revert those changes. Since we only expand the LTU
2991 operator in the RTL expansion pass, we might only need to handle the LEU
2992 case, unless we find other optimization passes perform more aggressive
2993 transformations. */
2994
2995 if (*code == LEU && CONST_INT_P (*op1))
2996 {
2997 *op1 = gen_int_mode (INTVAL (*op1) + 1, SImode);
2998 *code = LTU;
2999 }
3000 }
3001
3002 \f
3003 /* Describing Relative Costs of Operations. */
3004
3005 static int
3006 nds32_register_move_cost (machine_mode mode,
3007 reg_class_t from,
3008 reg_class_t to)
3009 {
3010 /* In garywolf cpu, FPR to GPR is chaper than other cpu. */
3011 if (TARGET_PIPELINE_GRAYWOLF)
3012 {
3013 if (GET_MODE_SIZE (mode) == 8)
3014 {
3015 /* DPR to GPR. */
3016 if (from == FP_REGS && to != FP_REGS)
3017 return 3;
3018 /* GPR to DPR. */
3019 if (from != FP_REGS && to == FP_REGS)
3020 return 2;
3021 }
3022 else
3023 {
3024 if ((from == FP_REGS && to != FP_REGS)
3025 || (from != FP_REGS && to == FP_REGS))
3026 return 2;
3027 }
3028 }
3029
3030 if ((from == FP_REGS && to != FP_REGS)
3031 || (from != FP_REGS && to == FP_REGS))
3032 return 3;
3033 else if (from == HIGH_REGS || to == HIGH_REGS)
3034 return optimize_size ? 6 : 2;
3035 else
3036 return 2;
3037 }
3038
3039 static int
3040 nds32_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3041 reg_class_t rclass ATTRIBUTE_UNUSED,
3042 bool in ATTRIBUTE_UNUSED)
3043 {
3044 return 8;
3045 }
3046
3047 /* This target hook describes the relative costs of RTL expressions.
3048 Return 'true' when all subexpressions of x have been processed.
3049 Return 'false' to sum the costs of sub-rtx, plus cost of this operation.
3050 Refer to gcc/rtlanal.c for more information. */
3051 static bool
3052 nds32_rtx_costs (rtx x,
3053 machine_mode mode,
3054 int outer_code,
3055 int opno,
3056 int *total,
3057 bool speed)
3058 {
3059 return nds32_rtx_costs_impl (x, mode, outer_code, opno, total, speed);
3060 }
3061
3062 static int
3063 nds32_address_cost (rtx address,
3064 machine_mode mode,
3065 addr_space_t as,
3066 bool speed)
3067 {
3068 return nds32_address_cost_impl (address, mode, as, speed);
3069 }
3070
3071 \f
3072 /* Dividing the Output into Sections (Texts, Data, . . . ). */
3073
3074 /* If references to a symbol or a constant must be treated differently
3075 depending on something about the variable or function named by the symbol
3076 (such as what section it is in), we use this hook to store flags
3077 in symbol_ref rtx. */
3078 static void
3079 nds32_encode_section_info (tree decl, rtx rtl, int new_decl_p)
3080 {
3081 default_encode_section_info (decl, rtl, new_decl_p);
3082
3083 /* For the memory rtx, if it references to rodata section, we can store
3084 NDS32_SYMBOL_FLAG_RODATA flag into symbol_ref rtx so that the
3085 nds32_legitimate_address_p() can determine how to treat such symbol_ref
3086 based on -mcmodel=X and this information. */
3087 if (MEM_P (rtl) && MEM_READONLY_P (rtl))
3088 {
3089 rtx addr = XEXP (rtl, 0);
3090
3091 if (GET_CODE (addr) == SYMBOL_REF)
3092 {
3093 /* For (mem (symbol_ref X)) case. */
3094 SYMBOL_REF_FLAGS (addr) |= NDS32_SYMBOL_FLAG_RODATA;
3095 }
3096 else if (GET_CODE (addr) == CONST
3097 && GET_CODE (XEXP (addr, 0)) == PLUS)
3098 {
3099 /* For (mem (const (plus (symbol_ref X) (const_int N)))) case. */
3100 rtx plus_op = XEXP (addr, 0);
3101 rtx op0 = XEXP (plus_op, 0);
3102 rtx op1 = XEXP (plus_op, 1);
3103
3104 if (GET_CODE (op0) == SYMBOL_REF && CONST_INT_P (op1))
3105 SYMBOL_REF_FLAGS (op0) |= NDS32_SYMBOL_FLAG_RODATA;
3106 }
3107 }
3108 }
3109
3110 \f
3111 /* Defining the Output Assembler Language. */
3112
3113 /* -- The Overall Framework of an Assembler File. */
3114
3115 static void
3116 nds32_asm_file_start (void)
3117 {
3118 default_file_start ();
3119
3120 if (flag_pic)
3121 fprintf (asm_out_file, "\t.pic\n");
3122
3123 /* Tell assembler which ABI we are using. */
3124 fprintf (asm_out_file, "\t! ABI version\n");
3125 if (TARGET_HARD_FLOAT)
3126 fprintf (asm_out_file, "\t.abi_2fp_plus\n");
3127 else
3128 fprintf (asm_out_file, "\t.abi_2\n");
3129
3130 /* Tell assembler that this asm code is generated by compiler. */
3131 fprintf (asm_out_file, "\t! This asm file is generated by compiler\n");
3132 fprintf (asm_out_file, "\t.flag\tverbatim\n");
3133
3134 /* Insert directive for linker to distinguish object's ict flag. */
3135 if (!TARGET_LINUX_ABI)
3136 {
3137 if (TARGET_ICT_MODEL_LARGE)
3138 fprintf (asm_out_file, "\t.ict_model\tlarge\n");
3139 else
3140 fprintf (asm_out_file, "\t.ict_model\tsmall\n");
3141 }
3142
3143 /* We need to provide the size of each vector for interrupt handler
3144 under elf toolchain. */
3145 if (!TARGET_LINUX_ABI)
3146 {
3147 fprintf (asm_out_file, "\t! This vector size directive is required "
3148 "for checking inconsistency on interrupt handler\n");
3149 fprintf (asm_out_file, "\t.vec_size\t%d\n", nds32_isr_vector_size);
3150 }
3151
3152 /* If user enables '-mforce-fp-as-gp' or compiles programs with -Os,
3153 the compiler may produce 'la $fp,_FP_BASE_' instruction
3154 at prologue for fp-as-gp optimization.
3155 We should emit weak reference of _FP_BASE_ to avoid undefined reference
3156 in case user does not pass '--relax' option to linker. */
3157 if (!TARGET_LINUX_ABI && (TARGET_FORCE_FP_AS_GP || optimize_size))
3158 {
3159 fprintf (asm_out_file, "\t! This weak reference is required to do "
3160 "fp-as-gp link time optimization\n");
3161 fprintf (asm_out_file, "\t.weak\t_FP_BASE_\n");
3162 }
3163
3164 fprintf (asm_out_file, "\t! ------------------------------------\n");
3165
3166 if (TARGET_ISA_V2)
3167 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V2");
3168 if (TARGET_ISA_V3)
3169 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3");
3170 if (TARGET_ISA_V3M)
3171 fprintf (asm_out_file, "\t! ISA family\t\t: %s\n", "V3M");
3172
3173 switch (nds32_cpu_option)
3174 {
3175 case CPU_N6:
3176 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "N6");
3177 break;
3178
3179 case CPU_N7:
3180 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "N7");
3181 break;
3182
3183 case CPU_N8:
3184 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "N8");
3185 break;
3186
3187 case CPU_E8:
3188 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "E8");
3189 break;
3190
3191 case CPU_N9:
3192 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "N9");
3193 break;
3194
3195 case CPU_N10:
3196 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "N10");
3197 break;
3198
3199 case CPU_GRAYWOLF:
3200 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "Graywolf");
3201 break;
3202
3203 case CPU_N12:
3204 case CPU_N13:
3205 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "N13");
3206 break;
3207
3208 case CPU_SIMPLE:
3209 fprintf (asm_out_file, "\t! Pipeline model\t: %s\n", "SIMPLE");
3210 break;
3211
3212 default:
3213 gcc_unreachable ();
3214 }
3215
3216 if (TARGET_CMODEL_SMALL)
3217 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "SMALL");
3218 if (TARGET_CMODEL_MEDIUM)
3219 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "MEDIUM");
3220 if (TARGET_CMODEL_LARGE)
3221 fprintf (asm_out_file, "\t! Code model\t\t: %s\n", "LARGE");
3222
3223 fprintf (asm_out_file, "\t! Endian setting\t: %s\n",
3224 ((TARGET_BIG_ENDIAN) ? "big-endian"
3225 : "little-endian"));
3226 fprintf (asm_out_file, "\t! Use SP floating-point instruction\t: %s\n",
3227 ((TARGET_FPU_SINGLE) ? "Yes"
3228 : "No"));
3229 fprintf (asm_out_file, "\t! Use DP floating-point instruction\t: %s\n",
3230 ((TARGET_FPU_DOUBLE) ? "Yes"
3231 : "No"));
3232 fprintf (asm_out_file, "\t! ABI version\t\t: %s\n",
3233 ((TARGET_HARD_FLOAT) ? "ABI2FP+"
3234 : "ABI2"));
3235
3236 fprintf (asm_out_file, "\t! ------------------------------------\n");
3237
3238 fprintf (asm_out_file, "\t! Use conditional move\t\t: %s\n",
3239 ((TARGET_CMOV) ? "Yes"
3240 : "No"));
3241 fprintf (asm_out_file, "\t! Use performance extension\t: %s\n",
3242 ((TARGET_EXT_PERF) ? "Yes"
3243 : "No"));
3244 fprintf (asm_out_file, "\t! Use performance extension 2\t: %s\n",
3245 ((TARGET_EXT_PERF2) ? "Yes"
3246 : "No"));
3247 fprintf (asm_out_file, "\t! Use string extension\t\t: %s\n",
3248 ((TARGET_EXT_STRING) ? "Yes"
3249 : "No"));
3250
3251 fprintf (asm_out_file, "\t! ------------------------------------\n");
3252
3253 fprintf (asm_out_file, "\t! V3PUSH instructions\t: %s\n",
3254 ((TARGET_V3PUSH) ? "Yes"
3255 : "No"));
3256 fprintf (asm_out_file, "\t! 16-bit instructions\t: %s\n",
3257 ((TARGET_16_BIT) ? "Yes"
3258 : "No"));
3259 fprintf (asm_out_file, "\t! Reduced registers set\t: %s\n",
3260 ((TARGET_REDUCED_REGS) ? "Yes"
3261 : "No"));
3262
3263 fprintf (asm_out_file, "\t! Support unaligned access\t\t: %s\n",
3264 (flag_unaligned_access ? "Yes"
3265 : "No"));
3266
3267 fprintf (asm_out_file, "\t! ------------------------------------\n");
3268
3269 if (optimize_size)
3270 fprintf (asm_out_file, "\t! Optimization level\t: -Os\n");
3271 else if (optimize_fast)
3272 fprintf (asm_out_file, "\t! Optimization level\t: -Ofast\n");
3273 else if (optimize_debug)
3274 fprintf (asm_out_file, "\t! Optimization level\t: -Og\n");
3275 else
3276 fprintf (asm_out_file, "\t! Optimization level\t: -O%d\n", optimize);
3277
3278 fprintf (asm_out_file, "\t! ------------------------------------\n");
3279
3280 fprintf (asm_out_file, "\t! Cache block size\t: %d\n",
3281 nds32_cache_block_size);
3282
3283 fprintf (asm_out_file, "\t! ------------------------------------\n");
3284
3285 nds32_asm_file_start_for_isr ();
3286 }
3287
3288 static void
3289 nds32_asm_file_end (void)
3290 {
3291 nds32_asm_file_end_for_isr ();
3292
3293 /* The NDS32 Linux stack is mapped non-executable by default, so add a
3294 .note.GNU-stack section. */
3295 if (TARGET_LINUX_ABI)
3296 file_end_indicate_exec_stack ();
3297
3298 fprintf (asm_out_file, "\t! ------------------------------------\n");
3299 }
3300
3301 static bool
3302 nds32_asm_output_addr_const_extra (FILE *file, rtx x)
3303 {
3304 if (GET_CODE (x) == UNSPEC)
3305 {
3306 switch (XINT (x, 1))
3307 {
3308 case UNSPEC_GOTINIT:
3309 output_addr_const (file, XVECEXP (x, 0, 0));
3310 break;
3311 case UNSPEC_GOTOFF:
3312 output_addr_const (file, XVECEXP (x, 0, 0));
3313 fputs ("@GOTOFF", file);
3314 break;
3315 case UNSPEC_GOT:
3316 output_addr_const (file, XVECEXP (x, 0, 0));
3317 fputs ("@GOT", file);
3318 break;
3319 case UNSPEC_PLT:
3320 output_addr_const (file, XVECEXP (x, 0, 0));
3321 fputs ("@PLT", file);
3322 break;
3323 case UNSPEC_TLSGD:
3324 output_addr_const (file, XVECEXP (x, 0, 0));
3325 fputs ("@TLSDESC", file);
3326 break;
3327 case UNSPEC_TLSLD:
3328 output_addr_const (file, XVECEXP (x, 0, 0));
3329 fputs ("@TLSDESC", file);
3330 break;
3331 case UNSPEC_TLSIE:
3332 output_addr_const (file, XVECEXP (x, 0, 0));
3333 fputs ("@GOTTPOFF", file);
3334 break;
3335 case UNSPEC_TLSLE:
3336 output_addr_const (file, XVECEXP (x, 0, 0));
3337 fputs ("@TPOFF", file);
3338 break;
3339 case UNSPEC_ICT:
3340 output_addr_const (file, XVECEXP (x, 0, 0));
3341 fputs ("@ICT", file);
3342 break;
3343 default:
3344 return false;
3345 }
3346 return true;
3347 }
3348 else
3349 return false;
3350 }
3351
3352 /* -- Output and Generation of Labels. */
3353
3354 static void
3355 nds32_asm_globalize_label (FILE *stream, const char *name)
3356 {
3357 fputs ("\t.global\t", stream);
3358 assemble_name (stream, name);
3359 fputs ("\n", stream);
3360 }
3361
3362 /* -- Output of Assembler Instructions. */
3363
3364 static void
3365 nds32_print_operand (FILE *stream, rtx x, int code)
3366 {
3367 HOST_WIDE_INT op_value = 0;
3368 HOST_WIDE_INT one_position;
3369 HOST_WIDE_INT zero_position;
3370 bool pick_lsb_p = false;
3371 bool pick_msb_p = false;
3372 int regno;
3373
3374 if (CONST_INT_P (x))
3375 op_value = INTVAL (x);
3376
3377 switch (code)
3378 {
3379 case 0 :
3380 /* Do nothing special. */
3381 break;
3382
3383 case 'b':
3384 /* Use exact_log2() to search the 0-bit position. */
3385 gcc_assert (CONST_INT_P (x));
3386 zero_position = exact_log2 (~UINTVAL (x) & GET_MODE_MASK (SImode));
3387 gcc_assert (zero_position != -1);
3388 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, zero_position);
3389
3390 /* No need to handle following process, so return immediately. */
3391 return;
3392
3393 case 'e':
3394 gcc_assert (MEM_P (x)
3395 && GET_CODE (XEXP (x, 0)) == PLUS
3396 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
3397 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (XEXP (XEXP (x, 0), 1)));
3398
3399 /* No need to handle following process, so return immediately. */
3400 return;
3401
3402 case 'v':
3403 gcc_assert (CONST_INT_P (x)
3404 && (INTVAL (x) == 0
3405 || INTVAL (x) == 8
3406 || INTVAL (x) == 16
3407 || INTVAL (x) == 24));
3408 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
3409
3410 /* No need to handle following process, so return immediately. */
3411 return;
3412
3413 case 'B':
3414 /* Use exact_log2() to search the 1-bit position. */
3415 gcc_assert (CONST_INT_P (x));
3416 one_position = exact_log2 (UINTVAL (x) & GET_MODE_MASK (SImode));
3417 gcc_assert (one_position != -1);
3418 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, one_position);
3419
3420 /* No need to handle following process, so return immediately. */
3421 return;
3422
3423 case 'L':
3424 /* X is supposed to be REG rtx. */
3425 gcc_assert (REG_P (x));
3426 /* Claim that we are going to pick LSB part of X. */
3427 pick_lsb_p = true;
3428 break;
3429
3430 case 'H':
3431 /* X is supposed to be REG rtx. */
3432 gcc_assert (REG_P (x));
3433 /* Claim that we are going to pick MSB part of X. */
3434 pick_msb_p = true;
3435 break;
3436
3437 case 'V':
3438 /* 'x' is supposed to be CONST_INT, get the value. */
3439 gcc_assert (CONST_INT_P (x));
3440
3441 /* According to the Andes architecture,
3442 the system/user register index range is 0 ~ 1023.
3443 In order to avoid conflict between user-specified-integer value
3444 and enum-specified-register value,
3445 the 'enum nds32_intrinsic_registers' value
3446 in nds32_intrinsic.h starts from 1024. */
3447 if (op_value < 1024 && op_value >= 0)
3448 {
3449 /* If user gives integer value directly (0~1023),
3450 we just print out the value. */
3451 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, op_value);
3452 }
3453 else if (op_value < 0
3454 || op_value >= ((int) ARRAY_SIZE (nds32_intrinsic_register_names)
3455 + 1024))
3456 {
3457 /* The enum index value for array size is out of range. */
3458 error ("intrinsic register index is out of range");
3459 }
3460 else
3461 {
3462 /* If user applies normal way with __NDS32_REG_XXX__ enum data,
3463 we can print out register name. Remember to substract 1024. */
3464 fprintf (stream, "%s",
3465 nds32_intrinsic_register_names[op_value - 1024]);
3466 }
3467
3468 /* No need to handle following process, so return immediately. */
3469 return;
3470
3471 case 'R': /* cctl valck */
3472 /* Note the cctl divide to 5 group and share the same name table. */
3473 if (op_value < 0 || op_value > 4)
3474 error ("CCTL intrinsic function subtype out of range!");
3475 fprintf (stream, "%s", nds32_cctl_names[op_value]);
3476 return;
3477
3478 case 'T': /* cctl idxwbinv */
3479 /* Note the cctl divide to 5 group and share the same name table. */
3480 if (op_value < 0 || op_value > 4)
3481 error ("CCTL intrinsic function subtype out of range!");
3482 fprintf (stream, "%s", nds32_cctl_names[op_value + 4]);
3483 return;
3484
3485 case 'U': /* cctl vawbinv */
3486 /* Note the cctl divide to 5 group and share the same name table. */
3487 if (op_value < 0 || op_value > 4)
3488 error ("CCTL intrinsic function subtype out of range!");
3489 fprintf (stream, "%s", nds32_cctl_names[op_value + 8]);
3490 return;
3491
3492 case 'X': /* cctl idxread */
3493 /* Note the cctl divide to 5 group and share the same name table. */
3494 if (op_value < 0 || op_value > 4)
3495 error ("CCTL intrinsic function subtype out of range!");
3496 fprintf (stream, "%s", nds32_cctl_names[op_value + 12]);
3497 return;
3498
3499 case 'W': /* cctl idxwitre */
3500 /* Note the cctl divide to 5 group and share the same name table. */
3501 if (op_value < 0 || op_value > 4)
3502 error ("CCTL intrinsic function subtype out of range!");
3503 fprintf (stream, "%s", nds32_cctl_names[op_value + 16]);
3504 return;
3505
3506 case 'Z': /* dpref */
3507 fprintf (stream, "%s", nds32_dpref_names[op_value]);
3508 return;
3509
3510 default :
3511 /* Unknown flag. */
3512 output_operand_lossage ("invalid operand output code");
3513 break;
3514 }
3515
3516 switch (GET_CODE (x))
3517 {
3518 case LABEL_REF:
3519 output_addr_const (stream, x);
3520 break;
3521
3522 case SYMBOL_REF:
3523 output_addr_const (stream, x);
3524
3525 if (!TARGET_LINUX_ABI && nds32_indirect_call_referenced_p (x))
3526 fprintf (stream, "@ICT");
3527
3528 break;
3529
3530 case REG:
3531 /* Print a Double-precision register name. */
3532 if ((GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
3533 && NDS32_IS_FPR_REGNUM (REGNO (x)))
3534 {
3535 regno = REGNO (x);
3536 if (!NDS32_FPR_REGNO_OK_FOR_DOUBLE (regno))
3537 {
3538 output_operand_lossage ("invalid operand for code '%c'", code);
3539 break;
3540 }
3541 fprintf (stream, "$fd%d", (regno - NDS32_FIRST_FPR_REGNUM) >> 1);
3542 break;
3543 }
3544
3545 /* Print LSB or MSB part of register pair if the
3546 constraint modifier 'L' or 'H' is specified. */
3547 if ((GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
3548 && NDS32_IS_GPR_REGNUM (REGNO (x)))
3549 {
3550 if ((pick_lsb_p && WORDS_BIG_ENDIAN)
3551 || (pick_msb_p && !WORDS_BIG_ENDIAN))
3552 {
3553 /* If we would like to print out LSB register under big-endian,
3554 or print out MSB register under little-endian, we need to
3555 increase register number. */
3556 regno = REGNO (x);
3557 regno++;
3558 fputs (reg_names[regno], stream);
3559 break;
3560 }
3561 }
3562
3563 /* Forbid using static chain register ($r16)
3564 on reduced-set registers configuration. */
3565 if (TARGET_REDUCED_REGS
3566 && REGNO (x) == STATIC_CHAIN_REGNUM)
3567 sorry ("a nested function is not supported for reduced registers");
3568
3569 /* Normal cases, print out register name. */
3570 fputs (reg_names[REGNO (x)], stream);
3571 break;
3572
3573 case MEM:
3574 output_address (GET_MODE (x), XEXP (x, 0));
3575 break;
3576
3577 case HIGH:
3578 if (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE)
3579 {
3580 const REAL_VALUE_TYPE *rv;
3581 long val;
3582 gcc_assert (GET_MODE (x) == SFmode);
3583
3584 rv = CONST_DOUBLE_REAL_VALUE (XEXP (x, 0));
3585 REAL_VALUE_TO_TARGET_SINGLE (*rv, val);
3586
3587 fprintf (stream, "hi20(0x%lx)", val);
3588 }
3589 else
3590 gcc_unreachable ();
3591 break;
3592
3593 case CONST_DOUBLE:
3594 const REAL_VALUE_TYPE *rv;
3595 long val;
3596 gcc_assert (GET_MODE (x) == SFmode);
3597
3598 rv = CONST_DOUBLE_REAL_VALUE (x);
3599 REAL_VALUE_TO_TARGET_SINGLE (*rv, val);
3600
3601 fprintf (stream, "0x%lx", val);
3602 break;
3603
3604 case CODE_LABEL:
3605 case CONST_INT:
3606 case CONST:
3607 output_addr_const (stream, x);
3608 break;
3609
3610 case CONST_VECTOR:
3611 fprintf (stream, HOST_WIDE_INT_PRINT_HEX, const_vector_to_hwint (x));
3612 break;
3613
3614 case LO_SUM:
3615 /* This is a special case for inline assembly using memory address 'p'.
3616 The inline assembly code is expected to use pesudo instruction
3617 for the operand. EX: la */
3618 output_addr_const (stream, XEXP(x, 1));
3619 break;
3620
3621 default:
3622 /* Generally, output_addr_const () is able to handle most cases.
3623 We want to see what CODE could appear,
3624 so we use gcc_unreachable() to stop it. */
3625 debug_rtx (x);
3626 gcc_unreachable ();
3627 break;
3628 }
3629 }
3630
3631 static void
3632 nds32_print_operand_address (FILE *stream,
3633 machine_mode mode ATTRIBUTE_UNUSED,
3634 rtx x)
3635 {
3636 rtx op0, op1;
3637
3638 switch (GET_CODE (x))
3639 {
3640 case SYMBOL_REF:
3641 case CONST:
3642 /* [ + symbol_ref] */
3643 /* [ + const_addr], where const_addr = symbol_ref + const_int */
3644 fputs ("[ + ", stream);
3645 output_addr_const (stream, x);
3646 fputs ("]", stream);
3647 break;
3648
3649 case LO_SUM:
3650 /* This is a special case for inline assembly using memory operand 'm'.
3651 The inline assembly code is expected to use pesudo instruction
3652 for the operand. EX: [ls].[bhw] */
3653 fputs ("[ + ", stream);
3654 op1 = XEXP (x, 1);
3655 output_addr_const (stream, op1);
3656 fputs ("]", stream);
3657 break;
3658
3659 case REG:
3660 /* Forbid using static chain register ($r16)
3661 on reduced-set registers configuration. */
3662 if (TARGET_REDUCED_REGS
3663 && REGNO (x) == STATIC_CHAIN_REGNUM)
3664 sorry ("a nested function is not supported for reduced registers");
3665
3666 /* [Ra] */
3667 fprintf (stream, "[%s]", reg_names[REGNO (x)]);
3668 break;
3669
3670 case PLUS:
3671 op0 = XEXP (x, 0);
3672 op1 = XEXP (x, 1);
3673
3674 /* Checking op0, forbid using static chain register ($r16)
3675 on reduced-set registers configuration. */
3676 if (TARGET_REDUCED_REGS
3677 && REG_P (op0)
3678 && REGNO (op0) == STATIC_CHAIN_REGNUM)
3679 sorry ("a nested function is not supported for reduced registers");
3680 /* Checking op1, forbid using static chain register ($r16)
3681 on reduced-set registers configuration. */
3682 if (TARGET_REDUCED_REGS
3683 && REG_P (op1)
3684 && REGNO (op1) == STATIC_CHAIN_REGNUM)
3685 sorry ("a nested function is not supported for reduced registers");
3686
3687 if (REG_P (op0) && CONST_INT_P (op1))
3688 {
3689 /* [Ra + imm] */
3690 fprintf (stream, "[%s + (" HOST_WIDE_INT_PRINT_DEC ")]",
3691 reg_names[REGNO (op0)], INTVAL (op1));
3692 }
3693 else if (REG_P (op0) && REG_P (op1))
3694 {
3695 /* [Ra + Rb] */
3696 fprintf (stream, "[%s + %s]",
3697 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
3698 }
3699 else if (GET_CODE (op0) == MULT && REG_P (op1))
3700 {
3701 /* [Ra + Rb << sv]
3702 From observation, the pattern looks like:
3703 (plus:SI (mult:SI (reg:SI 58)
3704 (const_int 4 [0x4]))
3705 (reg/f:SI 57)) */
3706 int sv;
3707
3708 /* We need to set sv to output shift value. */
3709 if (INTVAL (XEXP (op0, 1)) == 1)
3710 sv = 0;
3711 else if (INTVAL (XEXP (op0, 1)) == 2)
3712 sv = 1;
3713 else if (INTVAL (XEXP (op0, 1)) == 4)
3714 sv = 2;
3715 else if (INTVAL (XEXP (op0, 1)) == 8)
3716 sv = 3;
3717 else
3718 gcc_unreachable ();
3719
3720 fprintf (stream, "[%s + %s << %d]",
3721 reg_names[REGNO (op1)],
3722 reg_names[REGNO (XEXP (op0, 0))],
3723 sv);
3724 }
3725 else if (GET_CODE (op0) == ASHIFT && REG_P (op1))
3726 {
3727 /* [Ra + Rb << sv]
3728 In normal, ASHIFT can be converted to MULT like above case.
3729 But when the address rtx does not go through canonicalize_address
3730 defined in fwprop, we'll need this case. */
3731 int sv = INTVAL (XEXP (op0, 1));
3732 gcc_assert (sv <= 3 && sv >=0);
3733
3734 fprintf (stream, "[%s + %s << %d]",
3735 reg_names[REGNO (op1)],
3736 reg_names[REGNO (XEXP (op0, 0))],
3737 sv);
3738 }
3739 else
3740 {
3741 /* The control flow is not supposed to be here. */
3742 debug_rtx (x);
3743 gcc_unreachable ();
3744 }
3745
3746 break;
3747
3748 case POST_MODIFY:
3749 /* (post_modify (regA) (plus (regA) (regB)))
3750 (post_modify (regA) (plus (regA) (const_int)))
3751 We would like to extract
3752 regA and regB (or const_int) from plus rtx. */
3753 op0 = XEXP (XEXP (x, 1), 0);
3754 op1 = XEXP (XEXP (x, 1), 1);
3755
3756 /* Checking op0, forbid using static chain register ($r16)
3757 on reduced-set registers configuration. */
3758 if (TARGET_REDUCED_REGS
3759 && REG_P (op0)
3760 && REGNO (op0) == STATIC_CHAIN_REGNUM)
3761 sorry ("a nested function is not supported for reduced registers");
3762 /* Checking op1, forbid using static chain register ($r16)
3763 on reduced-set registers configuration. */
3764 if (TARGET_REDUCED_REGS
3765 && REG_P (op1)
3766 && REGNO (op1) == STATIC_CHAIN_REGNUM)
3767 sorry ("a nested function is not supported for reduced registers");
3768
3769 if (REG_P (op0) && REG_P (op1))
3770 {
3771 /* [Ra], Rb */
3772 fprintf (stream, "[%s], %s",
3773 reg_names[REGNO (op0)], reg_names[REGNO (op1)]);
3774 }
3775 else if (REG_P (op0) && CONST_INT_P (op1))
3776 {
3777 /* [Ra], imm */
3778 fprintf (stream, "[%s], " HOST_WIDE_INT_PRINT_DEC,
3779 reg_names[REGNO (op0)], INTVAL (op1));
3780 }
3781 else
3782 {
3783 /* The control flow is not supposed to be here. */
3784 debug_rtx (x);
3785 gcc_unreachable ();
3786 }
3787
3788 break;
3789
3790 case POST_INC:
3791 case POST_DEC:
3792 op0 = XEXP (x, 0);
3793
3794 /* Checking op0, forbid using static chain register ($r16)
3795 on reduced-set registers configuration. */
3796 if (TARGET_REDUCED_REGS
3797 && REG_P (op0)
3798 && REGNO (op0) == STATIC_CHAIN_REGNUM)
3799 sorry ("a nested function is not supported for reduced registers");
3800
3801 if (REG_P (op0))
3802 {
3803 /* "[Ra], 1/2/4" or "[Ra], -1/-2/-4"
3804 The 1/2/4 or -1/-2/-4 have been displayed in nds32.md.
3805 We only need to deal with register Ra. */
3806 fprintf (stream, "[%s]", reg_names[REGNO (op0)]);
3807 }
3808 else
3809 {
3810 /* The control flow is not supposed to be here. */
3811 debug_rtx (x);
3812 gcc_unreachable ();
3813 }
3814
3815 break;
3816
3817 default :
3818 /* Generally, output_addr_const () is able to handle most cases.
3819 We want to see what CODE could appear,
3820 so we use gcc_unreachable() to stop it. */
3821 debug_rtx (x);
3822 gcc_unreachable ();
3823 break;
3824 }
3825 }
3826
3827 /* -- Assembler Commands for Exception Regions. */
3828
3829 static rtx
3830 nds32_dwarf_register_span (rtx reg)
3831 {
3832 rtx dwarf_high, dwarf_low;
3833 rtx dwarf_single;
3834 machine_mode mode;
3835 int regno;
3836
3837 mode = GET_MODE (reg);
3838 regno = REGNO (reg);
3839
3840 /* We need to adjust dwarf register information for floating-point registers
3841 rather than using default register number mapping. */
3842 if (regno >= NDS32_FIRST_FPR_REGNUM
3843 && regno <= NDS32_LAST_FPR_REGNUM)
3844 {
3845 if (mode == DFmode || mode == SCmode)
3846 {
3847 /* By default, GCC maps increasing register numbers to increasing
3848 memory locations, but paired FPRs in NDS32 target are always
3849 big-endian, i.e.:
3850
3851 fd0 : fs0 fs1
3852 (MSB) (LSB)
3853
3854 We must return parallel rtx to represent such layout. */
3855 dwarf_high = gen_rtx_REG (word_mode, regno);
3856 dwarf_low = gen_rtx_REG (word_mode, regno + 1);
3857 return gen_rtx_PARALLEL (VOIDmode,
3858 gen_rtvec (2, dwarf_low, dwarf_high));
3859 }
3860 else if (mode == DCmode)
3861 {
3862 rtx dwarf_high_re = gen_rtx_REG (word_mode, regno);
3863 rtx dwarf_low_re = gen_rtx_REG (word_mode, regno + 1);
3864 rtx dwarf_high_im = gen_rtx_REG (word_mode, regno);
3865 rtx dwarf_low_im = gen_rtx_REG (word_mode, regno + 1);
3866 return gen_rtx_PARALLEL (VOIDmode,
3867 gen_rtvec (4, dwarf_low_re, dwarf_high_re,
3868 dwarf_high_im, dwarf_low_im));
3869 }
3870 else if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3871 {
3872 return NULL_RTX;
3873 }
3874 else
3875 {
3876 /* We should not be here. */
3877 gcc_unreachable ();
3878 }
3879 }
3880
3881 return NULL_RTX;
3882 }
3883
3884 /* Map internal gcc register numbers to DWARF2 register numbers. */
3885
3886 unsigned int
3887 nds32_dbx_register_number (unsigned int regno)
3888 {
3889 /* The nds32 port in GDB maintains a mapping between dwarf register
3890 number and displayed register name. For backward compatibility to
3891 previous toolchain, currently our gdb still has four registers
3892 (d0.l, d0.h, d1.l, and d1.h) between GPR and FPR while compiler
3893 does not count those four registers in its register number table.
3894 So we have to add 4 on its register number and then create new
3895 dwarf information. Hopefully we can discard such workaround
3896 in the future. */
3897 if (NDS32_IS_FPR_REGNUM (regno))
3898 return regno + 4;
3899
3900 return regno;
3901 }
3902
3903 \f
3904 /* Defining target-specific uses of __attribute__. */
3905
3906 /* Add some checking after merging attributes. */
3907 static tree
3908 nds32_merge_decl_attributes (tree olddecl, tree newdecl)
3909 {
3910 tree combined_attrs;
3911
3912 /* Create combined attributes. */
3913 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
3914 DECL_ATTRIBUTES (newdecl));
3915
3916 /* Since newdecl is acutally a duplicate of olddecl,
3917 we can take olddecl for some operations. */
3918 if (TREE_CODE (olddecl) == FUNCTION_DECL)
3919 {
3920 /* Check isr-specific attributes conflict. */
3921 nds32_check_isr_attrs_conflict (olddecl, combined_attrs);
3922 }
3923
3924 return combined_attrs;
3925 }
3926
3927 /* Add some checking when inserting attributes. */
3928 static void
3929 nds32_insert_attributes (tree decl, tree *attributes)
3930 {
3931 /* A "indirect_call" function attribute implies "noinline" and "noclone"
3932 for elf toolchain to support ROM patch mechanism. */
3933 if (TREE_CODE (decl) == FUNCTION_DECL
3934 && lookup_attribute ("indirect_call", *attributes) != NULL)
3935 {
3936 tree new_attrs = *attributes;
3937
3938 if (TARGET_LINUX_ABI)
3939 error("cannot use indirect_call attribute under linux toolchain");
3940
3941 if (lookup_attribute ("noinline", new_attrs) == NULL)
3942 new_attrs = tree_cons (get_identifier ("noinline"), NULL, new_attrs);
3943 if (lookup_attribute ("noclone", new_attrs) == NULL)
3944 new_attrs = tree_cons (get_identifier ("noclone"), NULL, new_attrs);
3945
3946 if (!TREE_PUBLIC (decl))
3947 error("indirect_call attribute can't apply for static function");
3948
3949 *attributes = new_attrs;
3950 }
3951
3952 /* For function declaration, we need to check isr-specific attributes:
3953 1. Call nds32_check_isr_attrs_conflict() to check any conflict.
3954 2. Check valid integer value for interrupt/exception.
3955 3. Check valid integer value for reset.
3956 4. Check valid function for nmi/warm. */
3957 if (TREE_CODE (decl) == FUNCTION_DECL)
3958 {
3959 tree func_attrs;
3960 tree intr, excp, reset;
3961
3962 /* Pick up function attributes. */
3963 func_attrs = *attributes;
3964
3965 /* 1. Call nds32_check_isr_attrs_conflict() to check any conflict. */
3966 nds32_check_isr_attrs_conflict (decl, func_attrs);
3967
3968 /* Now we are starting to check valid id value
3969 for interrupt/exception/reset.
3970 Note that we ONLY check its validity here.
3971 To construct isr vector information, it is still performed
3972 by nds32_construct_isr_vectors_information(). */
3973 intr = lookup_attribute ("interrupt", func_attrs);
3974 excp = lookup_attribute ("exception", func_attrs);
3975 reset = lookup_attribute ("reset", func_attrs);
3976
3977 /* The following code may use attribute arguments. If there is no
3978 argument from source code, it will cause segmentation fault.
3979 Therefore, return dircetly and report error message later. */
3980 if ((intr && TREE_VALUE (intr) == NULL)
3981 || (excp && TREE_VALUE (excp) == NULL)
3982 || (reset && TREE_VALUE (reset) == NULL))
3983 return;
3984
3985 /* ------------------------------------------------------------- */
3986 /* FIXME:
3987 FOR BACKWARD COMPATIBILITY, we need to support following patterns:
3988
3989 __attribute__((interrupt("XXX;YYY;id=ZZZ")))
3990 __attribute__((exception("XXX;YYY;id=ZZZ")))
3991 __attribute__((reset("vectors=XXX;nmi_func=YYY;warm_func=ZZZ")))
3992
3993 If interrupt/exception/reset appears and its argument is a
3994 STRING_CST, we will use other functions to parse string in the
3995 nds32_construct_isr_vectors_information() and then set necessary
3996 isr information in the nds32_isr_vectors[] array. Here we can
3997 just return immediately to avoid new-syntax checking. */
3998 if (intr != NULL_TREE
3999 && TREE_CODE (TREE_VALUE (TREE_VALUE (intr))) == STRING_CST)
4000 return;
4001 if (excp != NULL_TREE
4002 && TREE_CODE (TREE_VALUE (TREE_VALUE (excp))) == STRING_CST)
4003 return;
4004 if (reset != NULL_TREE
4005 && TREE_CODE (TREE_VALUE (TREE_VALUE (reset))) == STRING_CST)
4006 return;
4007 /* ------------------------------------------------------------- */
4008
4009 if (intr || excp)
4010 {
4011 /* Deal with interrupt/exception. */
4012 tree id_list;
4013 unsigned int lower_bound, upper_bound;
4014
4015 /* The way to handle interrupt or exception is the same,
4016 we just need to take care of actual vector number.
4017 For interrupt(0..63), the actual vector number is (9..72).
4018 For exception(1..8), the actual vector number is (1..8). */
4019 lower_bound = (intr) ? (0) : (1);
4020 upper_bound = (intr) ? (63) : (8);
4021
4022 /* Prepare id list so that we can traverse id value. */
4023 id_list = (intr) ? (TREE_VALUE (intr)) : (TREE_VALUE (excp));
4024
4025 /* 2. Check valid integer value for interrupt/exception. */
4026 while (id_list)
4027 {
4028 tree id;
4029
4030 /* Pick up each vector id value. */
4031 id = TREE_VALUE (id_list);
4032 /* Issue error if it is not a valid integer value. */
4033 if (TREE_CODE (id) != INTEGER_CST
4034 || wi::ltu_p (wi::to_wide (id), lower_bound)
4035 || wi::gtu_p (wi::to_wide (id), upper_bound))
4036 error ("invalid id value for interrupt/exception attribute");
4037
4038 /* Advance to next id. */
4039 id_list = TREE_CHAIN (id_list);
4040 }
4041 }
4042 else if (reset)
4043 {
4044 /* Deal with reset. */
4045 tree id_list;
4046 tree id;
4047 tree nmi, warm;
4048 unsigned int lower_bound;
4049 unsigned int upper_bound;
4050
4051 /* Prepare id_list and identify id value so that
4052 we can check if total number of vectors is valid. */
4053 id_list = TREE_VALUE (reset);
4054 id = TREE_VALUE (id_list);
4055
4056 /* The maximum numbers for user's interrupt is 64. */
4057 lower_bound = 0;
4058 upper_bound = 64;
4059
4060 /* 3. Check valid integer value for reset. */
4061 if (TREE_CODE (id) != INTEGER_CST
4062 || wi::ltu_p (wi::to_wide (id), lower_bound)
4063 || wi::gtu_p (wi::to_wide (id), upper_bound))
4064 error ("invalid id value for reset attribute");
4065
4066 /* 4. Check valid function for nmi/warm. */
4067 nmi = lookup_attribute ("nmi", func_attrs);
4068 warm = lookup_attribute ("warm", func_attrs);
4069
4070 if (nmi != NULL_TREE)
4071 {
4072 tree nmi_func_list;
4073 tree nmi_func;
4074
4075 nmi_func_list = TREE_VALUE (nmi);
4076 nmi_func = TREE_VALUE (nmi_func_list);
4077
4078 /* Issue error if it is not a valid nmi function. */
4079 if (TREE_CODE (nmi_func) != IDENTIFIER_NODE)
4080 error ("invalid nmi function for reset attribute");
4081 }
4082
4083 if (warm != NULL_TREE)
4084 {
4085 tree warm_func_list;
4086 tree warm_func;
4087
4088 warm_func_list = TREE_VALUE (warm);
4089 warm_func = TREE_VALUE (warm_func_list);
4090
4091 /* Issue error if it is not a valid warm function. */
4092 if (TREE_CODE (warm_func) != IDENTIFIER_NODE)
4093 error ("invalid warm function for reset attribute");
4094 }
4095 }
4096 else
4097 {
4098 /* No interrupt, exception, or reset attribute is set. */
4099 return;
4100 }
4101 }
4102 }
4103
4104 static bool
4105 nds32_option_pragma_parse (tree args ATTRIBUTE_UNUSED,
4106 tree pop_target ATTRIBUTE_UNUSED)
4107 {
4108 /* Currently, we do not parse any pragma target by ourself,
4109 so just simply return false. */
4110 return false;
4111 }
4112
4113 static void
4114 nds32_option_override (void)
4115 {
4116 /* After all the command options have been parsed,
4117 we shall deal with some flags for changing compiler settings. */
4118
4119 /* At first, we check if we have to strictly
4120 set some flags based on ISA family. */
4121 if (TARGET_ISA_V2)
4122 {
4123 /* Under V2 ISA, we need to strictly disable TARGET_V3PUSH. */
4124 target_flags &= ~MASK_V3PUSH;
4125 }
4126 if (TARGET_ISA_V3)
4127 {
4128 /* If this is ARCH_V3J, we need to enable TARGET_REDUCED_REGS. */
4129 if (nds32_arch_option == ARCH_V3J)
4130 target_flags |= MASK_REDUCED_REGS;
4131 }
4132 if (TARGET_ISA_V3M)
4133 {
4134 /* Under V3M ISA, we need to strictly enable TARGET_REDUCED_REGS. */
4135 target_flags |= MASK_REDUCED_REGS;
4136 /* Under V3M ISA, we need to strictly disable TARGET_EXT_PERF. */
4137 target_flags &= ~MASK_EXT_PERF;
4138 /* Under V3M ISA, we need to strictly disable TARGET_EXT_PERF2. */
4139 target_flags &= ~MASK_EXT_PERF2;
4140 /* Under V3M ISA, we need to strictly disable TARGET_EXT_STRING. */
4141 target_flags &= ~MASK_EXT_STRING;
4142
4143 if (flag_pic)
4144 error ("not support %<-fpic%> option for v3m toolchain");
4145 }
4146
4147 /* See if we are using reduced-set registers:
4148 $r0~$r5, $r6~$r10, $r15, $r28, $r29, $r30, $r31
4149 If so, we must forbid using $r11~$r14, $r16~$r27. */
4150 if (TARGET_REDUCED_REGS)
4151 {
4152 int r;
4153
4154 /* Prevent register allocator from
4155 choosing it as doing register allocation. */
4156 for (r = 11; r <= 14; r++)
4157 fixed_regs[r] = call_used_regs[r] = 1;
4158 for (r = 16; r <= 27; r++)
4159 fixed_regs[r] = call_used_regs[r] = 1;
4160 }
4161
4162 /* See if user explicitly would like to use fp-as-gp optimization.
4163 If so, we must prevent $fp from being allocated
4164 during register allocation. */
4165 if (TARGET_FORCE_FP_AS_GP)
4166 fixed_regs[FP_REGNUM] = call_used_regs[FP_REGNUM] = 1;
4167
4168 if (!TARGET_16_BIT)
4169 {
4170 /* Under no 16 bit ISA, we need to strictly disable TARGET_V3PUSH. */
4171 target_flags &= ~MASK_V3PUSH;
4172 }
4173
4174 if (TARGET_HARD_FLOAT && !(TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE))
4175 {
4176 if (nds32_arch_option == ARCH_V3S || nds32_arch_option == ARCH_V3F)
4177 error ("Disable FPU ISA, "
4178 "the ABI option must be enable '-mfloat-abi=soft'");
4179 else
4180 error ("'-mabi=2fp+' option only support when FPU available, "
4181 "must be enable '-mext-fpu-sp' or '-mext-fpu-dp'");
4182 }
4183
4184 nds32_init_rtx_costs ();
4185
4186 nds32_register_passes ();
4187 }
4188
4189 \f
4190 /* Miscellaneous Parameters. */
4191
4192 static rtx_insn *
4193 nds32_md_asm_adjust (vec<rtx> &outputs ATTRIBUTE_UNUSED,
4194 vec<rtx> &inputs ATTRIBUTE_UNUSED,
4195 vec<const char *> &constraints ATTRIBUTE_UNUSED,
4196 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
4197 {
4198 if (!flag_inline_asm_r15)
4199 {
4200 clobbers.safe_push (gen_rtx_REG (SImode, TA_REGNUM));
4201 SET_HARD_REG_BIT (clobbered_regs, TA_REGNUM);
4202 }
4203 return NULL;
4204 }
4205
4206 static void
4207 nds32_init_builtins (void)
4208 {
4209 nds32_init_builtins_impl ();
4210 }
4211
4212 static tree
4213 nds32_builtin_decl (unsigned code, bool initialize_p)
4214 {
4215 /* Implement in nds32-intrinsic.c. */
4216 return nds32_builtin_decl_impl (code, initialize_p);
4217 }
4218
4219 static rtx
4220 nds32_expand_builtin (tree exp,
4221 rtx target,
4222 rtx subtarget,
4223 machine_mode mode,
4224 int ignore)
4225 {
4226 return nds32_expand_builtin_impl (exp, target, subtarget, mode, ignore);
4227 }
4228
4229 /* Implement TARGET_INIT_LIBFUNCS. */
4230 static void
4231 nds32_init_libfuncs (void)
4232 {
4233 if (TARGET_LINUX_ABI)
4234 init_sync_libfuncs (UNITS_PER_WORD);
4235 }
4236
4237 /* ------------------------------------------------------------------------ */
4238
4239 /* PART 4: Implemet extern function definitions,
4240 the prototype is in nds32-protos.h. */
4241 \f
4242 /* Run-time Target Specification. */
4243
4244 void
4245 nds32_cpu_cpp_builtins(struct cpp_reader *pfile)
4246 {
4247 #define builtin_define(TXT) cpp_define (pfile, TXT)
4248 #define builtin_assert(TXT) cpp_assert (pfile, TXT)
4249 builtin_define ("__nds32__");
4250 builtin_define ("__NDS32__");
4251
4252 /* We need to provide builtin macro to describe the size of
4253 each vector for interrupt handler under elf toolchain. */
4254 if (!TARGET_LINUX_ABI)
4255 {
4256 if (TARGET_ISR_VECTOR_SIZE_4_BYTE)
4257 builtin_define ("__NDS32_ISR_VECTOR_SIZE_4__");
4258 else
4259 builtin_define ("__NDS32_ISR_VECTOR_SIZE_16__");
4260 }
4261
4262 if (TARGET_HARD_FLOAT)
4263 builtin_define ("__NDS32_ABI_2FP_PLUS__");
4264 else
4265 builtin_define ("__NDS32_ABI_2__");
4266
4267 if (TARGET_ISA_V2)
4268 builtin_define ("__NDS32_ISA_V2__");
4269 if (TARGET_ISA_V3)
4270 builtin_define ("__NDS32_ISA_V3__");
4271 if (TARGET_ISA_V3M)
4272 builtin_define ("__NDS32_ISA_V3M__");
4273
4274 if (TARGET_FPU_SINGLE)
4275 builtin_define ("__NDS32_EXT_FPU_SP__");
4276 if (TARGET_FPU_DOUBLE)
4277 builtin_define ("__NDS32_EXT_FPU_DP__");
4278
4279 if (TARGET_EXT_FPU_FMA)
4280 builtin_define ("__NDS32_EXT_FPU_FMA__");
4281 if (NDS32_EXT_FPU_DOT_E)
4282 builtin_define ("__NDS32_EXT_FPU_DOT_E__");
4283 if (TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE)
4284 {
4285 switch (nds32_fp_regnum)
4286 {
4287 case 0:
4288 case 4:
4289 builtin_define ("__NDS32_EXT_FPU_CONFIG_0__");
4290 break;
4291 case 1:
4292 case 5:
4293 builtin_define ("__NDS32_EXT_FPU_CONFIG_1__");
4294 break;
4295 case 2:
4296 case 6:
4297 builtin_define ("__NDS32_EXT_FPU_CONFIG_2__");
4298 break;
4299 case 3:
4300 case 7:
4301 builtin_define ("__NDS32_EXT_FPU_CONFIG_3__");
4302 break;
4303 default:
4304 abort ();
4305 }
4306 }
4307
4308 if (TARGET_BIG_ENDIAN)
4309 builtin_define ("__NDS32_EB__");
4310 else
4311 builtin_define ("__NDS32_EL__");
4312
4313 if (TARGET_REDUCED_REGS)
4314 builtin_define ("__NDS32_REDUCED_REGS__");
4315 if (TARGET_CMOV)
4316 builtin_define ("__NDS32_CMOV__");
4317 if (TARGET_EXT_PERF)
4318 builtin_define ("__NDS32_EXT_PERF__");
4319 if (TARGET_EXT_PERF2)
4320 builtin_define ("__NDS32_EXT_PERF2__");
4321 if (TARGET_EXT_STRING)
4322 builtin_define ("__NDS32_EXT_STRING__");
4323 if (TARGET_16_BIT)
4324 builtin_define ("__NDS32_16_BIT__");
4325 if (TARGET_GP_DIRECT)
4326 builtin_define ("__NDS32_GP_DIRECT__");
4327 if (TARGET_VH)
4328 builtin_define ("__NDS32_VH__");
4329 if (NDS32_EXT_DSP_P ())
4330 builtin_define ("__NDS32_EXT_DSP__");
4331
4332 if (TARGET_BIG_ENDIAN)
4333 builtin_define ("__big_endian__");
4334
4335 builtin_assert ("cpu=nds32");
4336 builtin_assert ("machine=nds32");
4337
4338 if (TARGET_HARD_FLOAT)
4339 builtin_define ("__NDS32_ABI_2FP_PLUS");
4340 else
4341 builtin_define ("__NDS32_ABI_2");
4342
4343 #undef builtin_define
4344 #undef builtin_assert
4345 }
4346
4347 \f
4348 /* Defining Data Structures for Per-function Information. */
4349
4350 void
4351 nds32_init_expanders (void)
4352 {
4353 /* Arrange to initialize and mark the machine per-function status. */
4354 init_machine_status = nds32_init_machine_status;
4355 }
4356
4357 \f
4358 /* Register Usage. */
4359
4360 /* -- Order of Allocation of Registers. */
4361
4362 void
4363 nds32_adjust_reg_alloc_order (void)
4364 {
4365 const int nds32_reg_alloc_order[] = REG_ALLOC_ORDER;
4366
4367 /* Copy the default register allocation order, which is designed
4368 to optimize for code size. */
4369 memcpy(reg_alloc_order, nds32_reg_alloc_order, sizeof (reg_alloc_order));
4370
4371 /* Adjust few register allocation order when optimizing for speed. */
4372 if (!optimize_size)
4373 {
4374 memcpy (reg_alloc_order, nds32_reg_alloc_order_for_speed,
4375 sizeof (nds32_reg_alloc_order_for_speed));
4376 }
4377 }
4378
4379 /* -- How Values Fit in Registers. */
4380
4381 static unsigned
4382 nds32_hard_regno_nregs (unsigned regno ATTRIBUTE_UNUSED,
4383 machine_mode mode)
4384 {
4385 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
4386 }
4387
4388 /* Implement TARGET_HARD_REGNO_MODE_OK. */
4389
4390 static bool
4391 nds32_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
4392 {
4393 if (regno >= FIRST_PSEUDO_REGISTER)
4394 return true;
4395
4396 if ((TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE) && NDS32_IS_FPR_REGNUM (regno))
4397 {
4398 if (NDS32_IS_EXT_FPR_REGNUM(regno))
4399 return (NDS32_FPR_REGNO_OK_FOR_DOUBLE(regno) && (mode == DFmode));
4400 else if (mode == SFmode || mode == SImode)
4401 return NDS32_FPR_REGNO_OK_FOR_SINGLE (regno);
4402 else if (mode == DFmode)
4403 return NDS32_FPR_REGNO_OK_FOR_DOUBLE (regno);
4404
4405 return false;
4406 }
4407
4408 /* Restrict double-word quantities to even register pairs. */
4409 if (regno <= NDS32_LAST_GPR_REGNUM)
4410 return (targetm.hard_regno_nregs (regno, mode) == 1
4411 || !((regno) & 1));
4412
4413 return false;
4414 }
4415
4416 /* Implement TARGET_MODES_TIEABLE_P. We can use general registers to
4417 tie QI/HI/SI modes together. */
4418
4419 static bool
4420 nds32_modes_tieable_p (machine_mode mode1, machine_mode mode2)
4421 {
4422 if ((GET_MODE_CLASS (mode1) == MODE_INT
4423 && GET_MODE_CLASS (mode2) == MODE_INT)
4424 && GET_MODE_SIZE (mode1) <= UNITS_PER_WORD
4425 && GET_MODE_SIZE (mode2) <= UNITS_PER_WORD)
4426 return true;
4427
4428 if (GET_MODE_SIZE (mode1) == GET_MODE_SIZE (mode2))
4429 {
4430 if ((TARGET_FPU_SINGLE && !TARGET_FPU_DOUBLE)
4431 && (mode1 == DFmode || mode2 == DFmode))
4432 return false;
4433 else
4434 return true;
4435 }
4436
4437 return false;
4438 }
4439 \f
4440 /* Register Classes. */
4441
4442 enum reg_class
4443 nds32_regno_reg_class (int regno)
4444 {
4445 /* Refer to nds32.h for more register class details. */
4446
4447 if (regno >= 0 && regno <= 7)
4448 return LOW_REGS;
4449 else if (regno >= 8 && regno <= 11)
4450 return MIDDLE_REGS;
4451 else if (regno >= 12 && regno <= 14)
4452 return HIGH_REGS;
4453 else if (regno == 15)
4454 return R15_TA_REG;
4455 else if (regno >= 16 && regno <= 19)
4456 return MIDDLE_REGS;
4457 else if (regno >= 20 && regno <= 31)
4458 return HIGH_REGS;
4459 else if (regno == 32 || regno == 33)
4460 {
4461 /* $SFP and $AP is FRAME_REGS in fact, However prevent IRA don't
4462 know how to allocate register for $SFP and $AP, just tell IRA they
4463 are GENERAL_REGS, and ARM do this hack too. */
4464 return GENERAL_REGS;
4465 }
4466 else if (regno >= 34 && regno <= 97)
4467 return FP_REGS;
4468 else
4469 return NO_REGS;
4470 }
4471
4472 \f
4473 /* Stack Layout and Calling Conventions. */
4474
4475 /* -- Basic Stack Layout. */
4476
4477 rtx
4478 nds32_dynamic_chain_address (rtx frameaddr)
4479 {
4480 if (TARGET_V3PUSH)
4481 {
4482 /* If -mv3push is specified, we push $fp, $gp, and $lp into stack.
4483 We can access dynamic chain address from stack by [$fp - 12]. */
4484 return plus_constant (Pmode, frameaddr, -12);
4485 }
4486 else
4487 {
4488 /* For general case we push $fp and $lp into stack at prologue.
4489 We can access dynamic chain address from stack by [$fp - 8]. */
4490 return plus_constant (Pmode, frameaddr, -8);
4491 }
4492 }
4493
4494 rtx
4495 nds32_return_addr_rtx (int count,
4496 rtx frameaddr)
4497 {
4498 int offset;
4499 rtx addr;
4500
4501 if (count != 0)
4502 {
4503 /* In nds32 ABI design, we can expect that $lp is always available
4504 from stack by [$fp - 4] location. */
4505 offset = -4;
4506 addr = plus_constant (Pmode, frameaddr, offset);
4507 addr = memory_address (Pmode, addr);
4508
4509 return gen_rtx_MEM (Pmode, addr);
4510 }
4511
4512 /* If count == 0, it means we are at current frame,
4513 the return address is $r30 ($lp). */
4514 return get_hard_reg_initial_val (Pmode, LP_REGNUM);
4515 }
4516
4517 /* -- Eliminating Frame Pointer and Arg Pointer. */
4518
4519 HOST_WIDE_INT
4520 nds32_initial_elimination_offset (unsigned int from_reg, unsigned int to_reg)
4521 {
4522 HOST_WIDE_INT offset;
4523
4524 /* Compute and setup stack frame size.
4525 The result will be in cfun->machine. */
4526 nds32_compute_stack_frame ();
4527
4528 /* Remember to consider
4529 cfun->machine->callee_saved_area_gpr_padding_bytes and
4530 cfun->machine->eh_return_data_regs_size
4531 when calculating offset. */
4532 if (from_reg == ARG_POINTER_REGNUM && to_reg == STACK_POINTER_REGNUM)
4533 {
4534 offset = (cfun->machine->fp_size
4535 + cfun->machine->gp_size
4536 + cfun->machine->lp_size
4537 + cfun->machine->callee_saved_gpr_regs_size
4538 + cfun->machine->callee_saved_area_gpr_padding_bytes
4539 + cfun->machine->callee_saved_fpr_regs_size
4540 + cfun->machine->eh_return_data_regs_size
4541 + cfun->machine->local_size
4542 + cfun->machine->out_args_size);
4543 }
4544 else if (from_reg == ARG_POINTER_REGNUM
4545 && to_reg == HARD_FRAME_POINTER_REGNUM)
4546 {
4547 offset = 0;
4548 }
4549 else if (from_reg == FRAME_POINTER_REGNUM
4550 && to_reg == STACK_POINTER_REGNUM)
4551 {
4552 offset = (cfun->machine->local_size + cfun->machine->out_args_size);
4553 }
4554 else if (from_reg == FRAME_POINTER_REGNUM
4555 && to_reg == HARD_FRAME_POINTER_REGNUM)
4556 {
4557 offset = (-1) * (cfun->machine->fp_size
4558 + cfun->machine->gp_size
4559 + cfun->machine->lp_size
4560 + cfun->machine->callee_saved_gpr_regs_size
4561 + cfun->machine->callee_saved_area_gpr_padding_bytes
4562 + cfun->machine->callee_saved_fpr_regs_size
4563 + cfun->machine->eh_return_data_regs_size);
4564 }
4565 else
4566 {
4567 gcc_unreachable ();
4568 }
4569
4570 return offset;
4571 }
4572
4573 /* -- Passing Arguments in Registers. */
4574
4575 void
4576 nds32_init_cumulative_args (CUMULATIVE_ARGS *cum,
4577 tree fntype ATTRIBUTE_UNUSED,
4578 rtx libname ATTRIBUTE_UNUSED,
4579 tree fndecl ATTRIBUTE_UNUSED,
4580 int n_named_args ATTRIBUTE_UNUSED)
4581 {
4582 /* Initial available registers. The values are offset against
4583 NDS32_GPR_ARG_FIRST_REGNUM and NDS32_FPR_ARG_FIRST_REGNUM
4584 for passing arguments. */
4585 cum->gpr_offset = 0;
4586 cum->fpr_offset = 0;
4587 }
4588
4589 /* -- Function Entry and Exit. */
4590
4591 /* Function for normal multiple push prologue. */
4592 void
4593 nds32_expand_prologue (void)
4594 {
4595 int fp_adjust;
4596 int sp_adjust;
4597 unsigned Rb, Re;
4598
4599 /* Compute and setup stack frame size.
4600 The result will be in cfun->machine. */
4601 nds32_compute_stack_frame ();
4602
4603 /* Check frame_pointer_needed again to prevent fp is need after reload. */
4604 if (frame_pointer_needed)
4605 cfun->machine->fp_as_gp_p = false;
4606
4607 /* If this is a variadic function, first we need to push argument
4608 registers that hold the unnamed argument value. */
4609 if (cfun->machine->va_args_size != 0)
4610 {
4611 Rb = cfun->machine->va_args_first_regno;
4612 Re = cfun->machine->va_args_last_regno;
4613 /* No need to push $fp, $gp, or $lp. */
4614 nds32_emit_stack_push_multiple (Rb, Re, false, false, false, true);
4615
4616 /* We may also need to adjust stack pointer for padding bytes
4617 because varargs may cause $sp not 8-byte aligned. */
4618 if (cfun->machine->va_args_area_padding_bytes)
4619 {
4620 /* Generate sp adjustment instruction. */
4621 sp_adjust = cfun->machine->va_args_area_padding_bytes;
4622
4623 nds32_emit_adjust_frame (stack_pointer_rtx,
4624 stack_pointer_rtx,
4625 -1 * sp_adjust);
4626 }
4627 }
4628
4629 /* If the function is 'naked',
4630 we do not have to generate prologue code fragment. */
4631 if (cfun->machine->naked_p && !flag_pic)
4632 return;
4633
4634 /* Get callee_first_regno and callee_last_regno. */
4635 Rb = cfun->machine->callee_saved_first_gpr_regno;
4636 Re = cfun->machine->callee_saved_last_gpr_regno;
4637
4638 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
4639 to be saved, we don't have to create multiple push instruction.
4640 Otherwise, a multiple push instruction is needed. */
4641 if (!(Rb == SP_REGNUM && Re == SP_REGNUM
4642 && cfun->machine->fp_size == 0
4643 && cfun->machine->gp_size == 0
4644 && cfun->machine->lp_size == 0))
4645 {
4646 /* Create multiple push instruction rtx. */
4647 nds32_emit_stack_push_multiple (
4648 Rb, Re,
4649 cfun->machine->fp_size, cfun->machine->gp_size, cfun->machine->lp_size,
4650 false);
4651 }
4652
4653 /* Save eh data registers. */
4654 if (cfun->machine->use_eh_return_p)
4655 {
4656 Rb = cfun->machine->eh_return_data_first_regno;
4657 Re = cfun->machine->eh_return_data_last_regno;
4658
4659 /* No need to push $fp, $gp, or $lp.
4660 Also, this is not variadic arguments push. */
4661 nds32_emit_stack_push_multiple (Rb, Re, false, false, false, false);
4662 }
4663
4664 /* Check frame_pointer_needed to see
4665 if we shall emit fp adjustment instruction. */
4666 if (frame_pointer_needed)
4667 {
4668 /* adjust $fp = $sp + ($fp size) + ($gp size) + ($lp size)
4669 + (4 * callee-saved-registers)
4670 + (4 * exception-handling-data-registers)
4671 Note: No need to adjust
4672 cfun->machine->callee_saved_area_gpr_padding_bytes,
4673 because, at this point, stack pointer is just
4674 at the position after push instruction. */
4675 fp_adjust = cfun->machine->fp_size
4676 + cfun->machine->gp_size
4677 + cfun->machine->lp_size
4678 + cfun->machine->callee_saved_gpr_regs_size
4679 + cfun->machine->eh_return_data_regs_size;
4680
4681 nds32_emit_adjust_frame (hard_frame_pointer_rtx,
4682 stack_pointer_rtx,
4683 fp_adjust);
4684 }
4685
4686 /* Save fpu registers. */
4687 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
4688 {
4689 /* When $sp moved to bottom of stack, we need to check whether
4690 the range of offset in the FPU instruction. */
4691 int fpr_offset = cfun->machine->local_size
4692 + cfun->machine->out_args_size
4693 + cfun->machine->callee_saved_fpr_regs_size;
4694
4695 /* Check FPU instruction offset imm14s. */
4696 if (!satisfies_constraint_Is14 (GEN_INT (fpr_offset)))
4697 {
4698 int fpr_space = cfun->machine->callee_saved_area_gpr_padding_bytes
4699 + cfun->machine->callee_saved_fpr_regs_size;
4700
4701 /* Save fpu registers, need to allocate stack space
4702 for fpu callee registers. And now $sp position
4703 on callee saved fpr registers. */
4704 nds32_emit_adjust_frame (stack_pointer_rtx,
4705 stack_pointer_rtx,
4706 -1 * fpr_space);
4707
4708 /* Emit fpu store instruction, using [$sp + offset] store
4709 fpu registers. */
4710 nds32_emit_push_fpr_callee_saved (0);
4711
4712 /* Adjust $sp = $sp - local_size - out_args_size. */
4713 sp_adjust = cfun->machine->local_size
4714 + cfun->machine->out_args_size;
4715
4716 /* Allocate stack space for local size and out args size. */
4717 nds32_emit_adjust_frame (stack_pointer_rtx,
4718 stack_pointer_rtx,
4719 -1 * sp_adjust);
4720 }
4721 else
4722 {
4723 /* Offset range in Is14, so $sp moved to bottom of stack. */
4724
4725 /* Adjust $sp = $sp - local_size - out_args_size
4726 - callee_saved_area_gpr_padding_bytes
4727 - callee_saved_fpr_regs_size. */
4728 sp_adjust = cfun->machine->local_size
4729 + cfun->machine->out_args_size
4730 + cfun->machine->callee_saved_area_gpr_padding_bytes
4731 + cfun->machine->callee_saved_fpr_regs_size;
4732
4733 nds32_emit_adjust_frame (stack_pointer_rtx,
4734 stack_pointer_rtx,
4735 -1 * sp_adjust);
4736
4737 /* Emit fpu store instruction, using [$sp + offset] store
4738 fpu registers. */
4739 int fpr_position = cfun->machine->out_args_size
4740 + cfun->machine->local_size;
4741 nds32_emit_push_fpr_callee_saved (fpr_position);
4742 }
4743 }
4744 else
4745 {
4746 /* Adjust $sp = $sp - local_size - out_args_size
4747 - callee_saved_area_gpr_padding_bytes. */
4748 sp_adjust = cfun->machine->local_size
4749 + cfun->machine->out_args_size
4750 + cfun->machine->callee_saved_area_gpr_padding_bytes;
4751
4752 /* sp_adjust value may be out of range of the addi instruction,
4753 create alternative add behavior with TA_REGNUM if necessary,
4754 using NEGATIVE value to tell that we are decreasing address. */
4755 nds32_emit_adjust_frame (stack_pointer_rtx,
4756 stack_pointer_rtx,
4757 -1 * sp_adjust);
4758 }
4759
4760 /* Emit gp setup instructions for -fpic. */
4761 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
4762 nds32_emit_load_gp ();
4763
4764 /* If user applies -mno-sched-prolog-epilog option,
4765 we need to prevent instructions of function body from being
4766 scheduled with stack adjustment in prologue. */
4767 if (!flag_sched_prolog_epilog)
4768 emit_insn (gen_blockage ());
4769 }
4770
4771 /* Function for normal multiple pop epilogue. */
4772 void
4773 nds32_expand_epilogue (bool sibcall_p)
4774 {
4775 int sp_adjust;
4776 unsigned Rb, Re;
4777
4778 /* Compute and setup stack frame size.
4779 The result will be in cfun->machine. */
4780 nds32_compute_stack_frame ();
4781
4782 /* If user applies -mno-sched-prolog-epilog option,
4783 we need to prevent instructions of function body from being
4784 scheduled with stack adjustment in epilogue. */
4785 if (!flag_sched_prolog_epilog)
4786 emit_insn (gen_blockage ());
4787
4788 /* If the function is 'naked', we do not have to generate
4789 epilogue code fragment BUT 'ret' instruction.
4790 However, if this function is also a variadic function,
4791 we need to create adjust stack pointer before 'ret' instruction. */
4792 if (cfun->machine->naked_p)
4793 {
4794 /* If this is a variadic function, we do not have to restore argument
4795 registers but need to adjust stack pointer back to previous stack
4796 frame location before return. */
4797 if (cfun->machine->va_args_size != 0)
4798 {
4799 /* Generate sp adjustment instruction.
4800 We need to consider padding bytes here. */
4801 sp_adjust = cfun->machine->va_args_size
4802 + cfun->machine->va_args_area_padding_bytes;
4803
4804 nds32_emit_adjust_frame (stack_pointer_rtx,
4805 stack_pointer_rtx,
4806 sp_adjust);
4807 }
4808
4809 /* Generate return instruction by using 'return_internal' pattern.
4810 Make sure this instruction is after gen_blockage(). */
4811 if (!sibcall_p)
4812 {
4813 /* We need to further check attributes to determine whether
4814 there should be return instruction at epilogue.
4815 If the attribute naked exists but -mno-ret-in-naked-func
4816 is issued, there is NO need to generate return instruction. */
4817 if (cfun->machine->attr_naked_p && !flag_ret_in_naked_func)
4818 return;
4819
4820 emit_jump_insn (gen_return_internal ());
4821 }
4822 return;
4823 }
4824
4825 if (frame_pointer_needed)
4826 {
4827 /* Restore fpu registers. */
4828 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
4829 {
4830 int gpr_padding = cfun->machine->callee_saved_area_gpr_padding_bytes;
4831
4832 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
4833 - (4 * callee-saved-registers)
4834 - (4 * exception-handling-data-registers)
4835 - (4 * callee-saved-gpr-registers padding byte)
4836 - (4 * callee-saved-fpr-registers)
4837 Note: we want to adjust stack pointer
4838 to the position for callee-saved fpr register,
4839 And restore fpu register use .bi instruction to adjust $sp
4840 from callee-saved fpr register to pop instruction. */
4841 sp_adjust = cfun->machine->fp_size
4842 + cfun->machine->gp_size
4843 + cfun->machine->lp_size
4844 + cfun->machine->callee_saved_gpr_regs_size
4845 + cfun->machine->eh_return_data_regs_size
4846 + cfun->machine->callee_saved_area_gpr_padding_bytes
4847 + cfun->machine->callee_saved_fpr_regs_size;
4848
4849 nds32_emit_adjust_frame (stack_pointer_rtx,
4850 hard_frame_pointer_rtx,
4851 -1 * sp_adjust);
4852
4853 /* Emit fpu load instruction, using .bi instruction
4854 load fpu registers. */
4855 nds32_emit_pop_fpr_callee_saved (gpr_padding);
4856 }
4857 else
4858 {
4859 /* adjust $sp = $fp - ($fp size) - ($gp size) - ($lp size)
4860 - (4 * callee-saved-registers)
4861 - (4 * exception-handling-data-registers)
4862 Note: No need to adjust
4863 cfun->machine->callee_saved_area_gpr_padding_bytes,
4864 because we want to adjust stack pointer
4865 to the position for pop instruction. */
4866 sp_adjust = cfun->machine->fp_size
4867 + cfun->machine->gp_size
4868 + cfun->machine->lp_size
4869 + cfun->machine->callee_saved_gpr_regs_size
4870 + cfun->machine->eh_return_data_regs_size;
4871
4872 nds32_emit_adjust_frame (stack_pointer_rtx,
4873 hard_frame_pointer_rtx,
4874 -1 * sp_adjust);
4875 }
4876 }
4877 else
4878 {
4879 /* Restore fpu registers. */
4880 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
4881 {
4882 int gpr_padding = cfun->machine->callee_saved_area_gpr_padding_bytes;
4883
4884 /* Adjust $sp = $sp + local_size + out_args_size. */
4885 sp_adjust = cfun->machine->local_size
4886 + cfun->machine->out_args_size;
4887
4888 nds32_emit_adjust_frame (stack_pointer_rtx,
4889 stack_pointer_rtx,
4890 sp_adjust);
4891
4892 /* Emit fpu load instruction, using .bi instruction
4893 load fpu registers, and adjust $sp from callee-saved fpr register
4894 to callee-saved gpr register. */
4895 nds32_emit_pop_fpr_callee_saved (gpr_padding);
4896 }
4897 else
4898 {
4899 /* If frame pointer is NOT needed,
4900 we cannot calculate the sp adjustment from frame pointer.
4901 Instead, we calculate the adjustment by local_size,
4902 out_args_size, and callee_saved_area_gpr_padding_bytes.
4903 Notice that such sp adjustment value may be out of range,
4904 so we have to deal with it as well. */
4905
4906 /* Adjust $sp = $sp + local_size + out_args_size
4907 + callee_saved_area_gpr_padding_bytes. */
4908 sp_adjust = cfun->machine->local_size
4909 + cfun->machine->out_args_size
4910 + cfun->machine->callee_saved_area_gpr_padding_bytes;
4911
4912 nds32_emit_adjust_frame (stack_pointer_rtx,
4913 stack_pointer_rtx,
4914 sp_adjust);
4915 }
4916 }
4917
4918 /* Restore eh data registers. */
4919 if (cfun->machine->use_eh_return_p)
4920 {
4921 Rb = cfun->machine->eh_return_data_first_regno;
4922 Re = cfun->machine->eh_return_data_last_regno;
4923
4924 /* No need to pop $fp, $gp, or $lp. */
4925 nds32_emit_stack_pop_multiple (Rb, Re, false, false, false);
4926 }
4927
4928 /* Get callee_first_regno and callee_last_regno. */
4929 Rb = cfun->machine->callee_saved_first_gpr_regno;
4930 Re = cfun->machine->callee_saved_last_gpr_regno;
4931
4932 /* If $fp, $gp, $lp, and all callee-save registers are NOT required
4933 to be saved, we don't have to create multiple pop instruction.
4934 Otherwise, a multiple pop instruction is needed. */
4935 if (!(Rb == SP_REGNUM && Re == SP_REGNUM
4936 && cfun->machine->fp_size == 0
4937 && cfun->machine->gp_size == 0
4938 && cfun->machine->lp_size == 0))
4939 {
4940 /* Create multiple pop instruction rtx. */
4941 nds32_emit_stack_pop_multiple (
4942 Rb, Re,
4943 cfun->machine->fp_size, cfun->machine->gp_size, cfun->machine->lp_size);
4944 }
4945
4946 /* If this is a variadic function, we do not have to restore argument
4947 registers but need to adjust stack pointer back to previous stack
4948 frame location before return. */
4949 if (cfun->machine->va_args_size != 0)
4950 {
4951 /* Generate sp adjustment instruction.
4952 We need to consider padding bytes here. */
4953 sp_adjust = cfun->machine->va_args_size
4954 + cfun->machine->va_args_area_padding_bytes;
4955
4956 nds32_emit_adjust_frame (stack_pointer_rtx,
4957 stack_pointer_rtx,
4958 sp_adjust);
4959 }
4960
4961 /* If this function uses __builtin_eh_return, make stack adjustment
4962 for exception handler. */
4963 if (cfun->machine->use_eh_return_p)
4964 {
4965 /* We need to unwind the stack by the offset computed by
4966 EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
4967 based on SP. Ideally we would update the SP and define the
4968 CFA along the lines of:
4969
4970 SP = SP + EH_RETURN_STACKADJ_RTX
4971 (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
4972
4973 However the dwarf emitter only understands a constant
4974 register offset.
4975
4976 The solution chosen here is to use the otherwise $ta ($r15)
4977 as a temporary register to hold the current SP value. The
4978 CFA is described using $ta then SP is modified. */
4979
4980 rtx ta_reg;
4981 rtx insn;
4982
4983 ta_reg = gen_rtx_REG (SImode, TA_REGNUM);
4984
4985 insn = emit_move_insn (ta_reg, stack_pointer_rtx);
4986 add_reg_note (insn, REG_CFA_DEF_CFA, ta_reg);
4987 RTX_FRAME_RELATED_P (insn) = 1;
4988
4989 emit_insn (gen_addsi3 (stack_pointer_rtx,
4990 stack_pointer_rtx,
4991 EH_RETURN_STACKADJ_RTX));
4992
4993 /* Ensure the assignment to $ta does not get optimized away. */
4994 emit_use (ta_reg);
4995 }
4996
4997 /* Generate return instruction. */
4998 if (!sibcall_p)
4999 emit_jump_insn (gen_return_internal ());
5000 }
5001
5002 /* Function for v3push prologue. */
5003 void
5004 nds32_expand_prologue_v3push (void)
5005 {
5006 int fp_adjust;
5007 int sp_adjust;
5008 int fpr_space = 0;
5009 unsigned Rb, Re;
5010
5011 /* Compute and setup stack frame size.
5012 The result will be in cfun->machine. */
5013 nds32_compute_stack_frame ();
5014
5015 if (cfun->machine->callee_saved_gpr_regs_size > 0)
5016 df_set_regs_ever_live (FP_REGNUM, 1);
5017
5018 /* Check frame_pointer_needed again to prevent fp is need after reload. */
5019 if (frame_pointer_needed)
5020 cfun->machine->fp_as_gp_p = false;
5021
5022 /* If the function is 'naked',
5023 we do not have to generate prologue code fragment. */
5024 if (cfun->machine->naked_p && !flag_pic)
5025 return;
5026
5027 /* Get callee_first_regno and callee_last_regno. */
5028 Rb = cfun->machine->callee_saved_first_gpr_regno;
5029 Re = cfun->machine->callee_saved_last_gpr_regno;
5030
5031 /* Calculate sp_adjust first to test if 'push25 Re,imm8u' is available,
5032 where imm8u has to be 8-byte alignment. */
5033 sp_adjust = cfun->machine->local_size
5034 + cfun->machine->out_args_size
5035 + cfun->machine->callee_saved_area_gpr_padding_bytes
5036 + cfun->machine->callee_saved_fpr_regs_size;
5037
5038 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
5039 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
5040 {
5041 /* We can use 'push25 Re,imm8u'. */
5042
5043 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
5044 the pattern 'stack_v3push' is implemented in nds32.md. */
5045 nds32_emit_stack_v3push (Rb, Re, sp_adjust);
5046
5047 /* Save fpu registers. */
5048 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5049 {
5050 /* Calculate fpr position. */
5051 int fpr_position = cfun->machine->local_size
5052 + cfun->machine->out_args_size;
5053 /* Emit fpu store instruction, using [$sp + offset] store
5054 fpu registers. */
5055 nds32_emit_push_fpr_callee_saved (fpr_position);
5056 }
5057
5058 /* Check frame_pointer_needed to see
5059 if we shall emit fp adjustment instruction. */
5060 if (frame_pointer_needed)
5061 {
5062 /* adjust $fp = $sp + 4 ($fp size)
5063 + 4 ($gp size)
5064 + 4 ($lp size)
5065 + (4 * n) (callee-saved registers)
5066 + sp_adjust ('push25 Re,imm8u')
5067 Note: Since we use 'push25 Re,imm8u',
5068 the position of stack pointer is further
5069 changed after push instruction.
5070 Hence, we need to take sp_adjust value
5071 into consideration. */
5072 fp_adjust = cfun->machine->fp_size
5073 + cfun->machine->gp_size
5074 + cfun->machine->lp_size
5075 + cfun->machine->callee_saved_gpr_regs_size
5076 + sp_adjust;
5077
5078 nds32_emit_adjust_frame (hard_frame_pointer_rtx,
5079 stack_pointer_rtx,
5080 fp_adjust);
5081 }
5082 }
5083 else
5084 {
5085 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5086 {
5087 /* Calculate fpr space. */
5088 fpr_space = cfun->machine->callee_saved_area_gpr_padding_bytes
5089 + cfun->machine->callee_saved_fpr_regs_size;
5090
5091 /* We have to use 'push25 Re, fpr_space', to pre-allocate
5092 callee saved fpr registers space. */
5093 nds32_emit_stack_v3push (Rb, Re, fpr_space);
5094 nds32_emit_push_fpr_callee_saved (0);
5095 }
5096 else
5097 {
5098 /* We have to use 'push25 Re,0' and
5099 expand one more instruction to adjust $sp later. */
5100
5101 /* nds32_emit_stack_v3push(last_regno, sp_adjust),
5102 the pattern 'stack_v3push' is implemented in nds32.md. */
5103 nds32_emit_stack_v3push (Rb, Re, 0);
5104 }
5105
5106 /* Check frame_pointer_needed to see
5107 if we shall emit fp adjustment instruction. */
5108 if (frame_pointer_needed)
5109 {
5110 /* adjust $fp = $sp + 4 ($fp size)
5111 + 4 ($gp size)
5112 + 4 ($lp size)
5113 + (4 * n) (callee-saved registers)
5114 Note: Since we use 'push25 Re,0',
5115 the stack pointer is just at the position
5116 after push instruction.
5117 No need to take sp_adjust into consideration. */
5118 fp_adjust = cfun->machine->fp_size
5119 + cfun->machine->gp_size
5120 + cfun->machine->lp_size
5121 + cfun->machine->callee_saved_gpr_regs_size;
5122
5123 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5124 {
5125 /* We use 'push25 Re, fpr_space', the $sp is
5126 on callee saved fpr position, so need to consider
5127 fpr space. */
5128 fp_adjust = fp_adjust + fpr_space;
5129 }
5130
5131 nds32_emit_adjust_frame (hard_frame_pointer_rtx,
5132 stack_pointer_rtx,
5133 fp_adjust);
5134 }
5135
5136 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5137 {
5138 /* We use 'push25 Re, fpr_space',
5139 the $sp is on callee saved fpr position,
5140 no need to consider fpr space. */
5141 sp_adjust = sp_adjust - fpr_space;
5142 }
5143
5144 /* Because we use 'push25 Re,0',
5145 we need to expand one more instruction to adjust $sp.
5146 using NEGATIVE value to tell that we are decreasing address. */
5147 nds32_emit_adjust_frame (stack_pointer_rtx,
5148 stack_pointer_rtx,
5149 -1 * sp_adjust);
5150 }
5151
5152 /* Emit gp setup instructions for -fpic. */
5153 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
5154 nds32_emit_load_gp ();
5155
5156 /* Prevent the instruction scheduler from
5157 moving instructions across the boundary. */
5158 emit_insn (gen_blockage ());
5159 }
5160
5161 /* Function for v3pop epilogue. */
5162 void
5163 nds32_expand_epilogue_v3pop (bool sibcall_p)
5164 {
5165 int sp_adjust;
5166 unsigned Rb, Re;
5167
5168 /* Compute and setup stack frame size.
5169 The result will be in cfun->machine. */
5170 nds32_compute_stack_frame ();
5171
5172 /* Prevent the instruction scheduler from
5173 moving instructions across the boundary. */
5174 emit_insn (gen_blockage ());
5175
5176 /* If the function is 'naked', we do not have to generate
5177 epilogue code fragment BUT 'ret' instruction. */
5178 if (cfun->machine->naked_p)
5179 {
5180 /* Generate return instruction by using 'return_internal' pattern.
5181 Make sure this instruction is after gen_blockage().
5182 First we need to check this is a function without sibling call. */
5183 if (!sibcall_p)
5184 {
5185 /* We need to further check attributes to determine whether
5186 there should be return instruction at epilogue.
5187 If the attribute naked exists but -mno-ret-in-naked-func
5188 is issued, there is NO need to generate return instruction. */
5189 if (cfun->machine->attr_naked_p && !flag_ret_in_naked_func)
5190 return;
5191
5192 emit_jump_insn (gen_return_internal ());
5193 }
5194 return;
5195 }
5196
5197 /* Get callee_first_regno and callee_last_regno. */
5198 Rb = cfun->machine->callee_saved_first_gpr_regno;
5199 Re = cfun->machine->callee_saved_last_gpr_regno;
5200
5201 /* Calculate sp_adjust first to test if 'pop25 Re,imm8u' is available,
5202 where imm8u has to be 8-byte alignment. */
5203 sp_adjust = cfun->machine->local_size
5204 + cfun->machine->out_args_size
5205 + cfun->machine->callee_saved_area_gpr_padding_bytes
5206 + cfun->machine->callee_saved_fpr_regs_size;
5207
5208 /* We have to consider alloca issue as well.
5209 If the function does call alloca(), the stack pointer is not fixed.
5210 In that case, we cannot use 'pop25 Re,imm8u' directly.
5211 We have to caculate stack pointer from frame pointer
5212 and then use 'pop25 Re,0'.
5213 Of course, the frame_pointer_needed should be nonzero
5214 if the function calls alloca(). */
5215 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
5216 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
5217 && !cfun->calls_alloca)
5218 {
5219 /* Restore fpu registers. */
5220 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5221 {
5222 int fpr_position = cfun->machine->local_size
5223 + cfun->machine->out_args_size;
5224 /* Emit fpu load instruction, using [$sp + offset] restore
5225 fpu registers. */
5226 nds32_emit_v3pop_fpr_callee_saved (fpr_position);
5227 }
5228
5229 /* We can use 'pop25 Re,imm8u'. */
5230
5231 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
5232 the pattern 'stack_v3pop' is implementad in nds32.md. */
5233 nds32_emit_stack_v3pop (Rb, Re, sp_adjust);
5234 }
5235 else
5236 {
5237 /* We have to use 'pop25 Re,0', and prior to it,
5238 we must expand one more instruction to adjust $sp. */
5239
5240 if (frame_pointer_needed)
5241 {
5242 /* adjust $sp = $fp - 4 ($fp size)
5243 - 4 ($gp size)
5244 - 4 ($lp size)
5245 - (4 * n) (callee-saved registers)
5246 Note: No need to adjust
5247 cfun->machine->callee_saved_area_gpr_padding_bytes,
5248 because we want to adjust stack pointer
5249 to the position for pop instruction. */
5250 sp_adjust = cfun->machine->fp_size
5251 + cfun->machine->gp_size
5252 + cfun->machine->lp_size
5253 + cfun->machine->callee_saved_gpr_regs_size;
5254
5255 /* Restore fpu registers. */
5256 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5257 {
5258 /* Set $sp to callee saved fpr position, we need to restore
5259 fpr registers. */
5260 sp_adjust = sp_adjust
5261 + cfun->machine->callee_saved_area_gpr_padding_bytes
5262 + cfun->machine->callee_saved_fpr_regs_size;
5263
5264 nds32_emit_adjust_frame (stack_pointer_rtx,
5265 hard_frame_pointer_rtx,
5266 -1 * sp_adjust);
5267
5268 /* Emit fpu load instruction, using [$sp + offset] restore
5269 fpu registers. */
5270 nds32_emit_v3pop_fpr_callee_saved (0);
5271 }
5272 else
5273 {
5274 nds32_emit_adjust_frame (stack_pointer_rtx,
5275 hard_frame_pointer_rtx,
5276 -1 * sp_adjust);
5277 }
5278 }
5279 else
5280 {
5281 /* If frame pointer is NOT needed,
5282 we cannot calculate the sp adjustment from frame pointer.
5283 Instead, we calculate the adjustment by local_size,
5284 out_args_size, and callee_saved_area_padding_bytes.
5285 Notice that such sp adjustment value may be out of range,
5286 so we have to deal with it as well. */
5287
5288 /* Adjust $sp = $sp + local_size + out_args_size
5289 + callee_saved_area_gpr_padding_bytes
5290 + callee_saved_fpr_regs_size. */
5291 sp_adjust = cfun->machine->local_size
5292 + cfun->machine->out_args_size
5293 + cfun->machine->callee_saved_area_gpr_padding_bytes
5294 + cfun->machine->callee_saved_fpr_regs_size;
5295
5296 /* Restore fpu registers. */
5297 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5298 {
5299 /* Set $sp to callee saved fpr position, we need to restore
5300 fpr registers. */
5301 sp_adjust = sp_adjust
5302 - cfun->machine->callee_saved_area_gpr_padding_bytes
5303 - cfun->machine->callee_saved_fpr_regs_size;
5304
5305 nds32_emit_adjust_frame (stack_pointer_rtx,
5306 stack_pointer_rtx,
5307 sp_adjust);
5308
5309 /* Emit fpu load instruction, using [$sp + offset] restore
5310 fpu registers. */
5311 nds32_emit_v3pop_fpr_callee_saved (0);
5312 }
5313 else
5314 {
5315 /* sp_adjust value may be out of range of the addi instruction,
5316 create alternative add behavior with TA_REGNUM if necessary,
5317 using POSITIVE value to tell that we are increasing
5318 address. */
5319 nds32_emit_adjust_frame (stack_pointer_rtx,
5320 stack_pointer_rtx,
5321 sp_adjust);
5322 }
5323 }
5324
5325 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
5326 {
5327 /* We have fpr need to restore, so $sp is set on callee saved fpr
5328 position. And we use 'pop25 Re, fpr_space' to adjust $sp. */
5329 int fpr_space = cfun->machine->callee_saved_area_gpr_padding_bytes
5330 + cfun->machine->callee_saved_fpr_regs_size;
5331 nds32_emit_stack_v3pop (Rb, Re, fpr_space);
5332 }
5333 else
5334 {
5335 /* nds32_emit_stack_v3pop(last_regno, sp_adjust),
5336 the pattern 'stack_v3pop' is implementad in nds32.md. */
5337 nds32_emit_stack_v3pop (Rb, Re, 0);
5338 }
5339 }
5340 /* Generate return instruction. */
5341 emit_jump_insn (gen_pop25return ());
5342 }
5343
5344 /* Return nonzero if this function is known to have a null epilogue.
5345 This allows the optimizer to omit jumps to jumps if no stack
5346 was created. */
5347 int
5348 nds32_can_use_return_insn (void)
5349 {
5350 int sp_adjust;
5351
5352 /* Prior to reloading, we can't tell how many registers must be saved.
5353 Thus we cannot determine whether this function has null epilogue. */
5354 if (!reload_completed)
5355 return 0;
5356
5357 /* If attribute 'naked' appears but -mno-ret-in-naked-func is used,
5358 we cannot use return instruction. */
5359 if (cfun->machine->attr_naked_p && !flag_ret_in_naked_func)
5360 return 0;
5361
5362 sp_adjust = cfun->machine->local_size
5363 + cfun->machine->out_args_size
5364 + cfun->machine->callee_saved_area_gpr_padding_bytes
5365 + cfun->machine->callee_saved_fpr_regs_size;
5366 if (!cfun->machine->fp_as_gp_p
5367 && satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
5368 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
5369 && !cfun->calls_alloca
5370 && NDS32_V3PUSH_AVAILABLE_P
5371 && !(TARGET_HARD_FLOAT
5372 && (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)))
5373 return 1;
5374
5375 /* If no stack was created, two conditions must be satisfied:
5376 1. This is a naked function.
5377 So there is no callee-saved, local size, or outgoing size.
5378 2. This is NOT a variadic function.
5379 So there is no pushing arguement registers into the stack. */
5380 return (cfun->machine->naked_p && (cfun->machine->va_args_size == 0));
5381 }
5382
5383 scalar_int_mode
5384 nds32_case_vector_shorten_mode (int min_offset, int max_offset,
5385 rtx body ATTRIBUTE_UNUSED)
5386 {
5387 if (min_offset < 0 || max_offset >= 0x2000)
5388 return SImode;
5389 else
5390 {
5391 /* The jump table maybe need to 2 byte alignment,
5392 so reserved 1 byte for check max_offset. */
5393 if (max_offset >= 0xff)
5394 return HImode;
5395 else
5396 return QImode;
5397 }
5398 }
5399
5400 /* ------------------------------------------------------------------------ */
5401
5402 /* Return alignment for the label. */
5403 int
5404 nds32_target_alignment (rtx_insn *label)
5405 {
5406 rtx_insn *insn;
5407
5408 if (!NDS32_ALIGN_P ())
5409 return 0;
5410
5411 insn = next_active_insn (label);
5412
5413 /* Always align to 4 byte when first instruction after label is jump
5414 instruction since length for that might changed, so let's always align
5415 it for make sure we don't lose any perfomance here. */
5416 if (insn == 0
5417 || (get_attr_length (insn) == 2
5418 && !JUMP_P (insn) && !CALL_P (insn)))
5419 return 0;
5420 else
5421 return 2;
5422 }
5423
5424 /* Return alignment for data. */
5425 unsigned int
5426 nds32_data_alignment (tree data,
5427 unsigned int basic_align)
5428 {
5429 if ((basic_align < BITS_PER_WORD)
5430 && (TREE_CODE (data) == ARRAY_TYPE
5431 || TREE_CODE (data) == UNION_TYPE
5432 || TREE_CODE (data) == RECORD_TYPE))
5433 return BITS_PER_WORD;
5434 else
5435 return basic_align;
5436 }
5437
5438 /* Return alignment for constant value. */
5439 static HOST_WIDE_INT
5440 nds32_constant_alignment (const_tree constant,
5441 HOST_WIDE_INT basic_align)
5442 {
5443 /* Make string literal and constant for constructor to word align. */
5444 if (((TREE_CODE (constant) == STRING_CST
5445 || TREE_CODE (constant) == CONSTRUCTOR
5446 || TREE_CODE (constant) == UNION_TYPE
5447 || TREE_CODE (constant) == RECORD_TYPE
5448 || TREE_CODE (constant) == ARRAY_TYPE)
5449 && basic_align < BITS_PER_WORD))
5450 return BITS_PER_WORD;
5451 else
5452 return basic_align;
5453 }
5454
5455 /* Return alignment for local variable. */
5456 unsigned int
5457 nds32_local_alignment (tree local ATTRIBUTE_UNUSED,
5458 unsigned int basic_align)
5459 {
5460 bool at_least_align_to_word = false;
5461 /* Make local array, struct and union at least align to word for make
5462 sure it can unroll memcpy when initialize by constant. */
5463 switch (TREE_CODE (local))
5464 {
5465 case ARRAY_TYPE:
5466 case RECORD_TYPE:
5467 case UNION_TYPE:
5468 at_least_align_to_word = true;
5469 break;
5470 default:
5471 at_least_align_to_word = false;
5472 break;
5473 }
5474 if (at_least_align_to_word
5475 && (basic_align < BITS_PER_WORD))
5476 return BITS_PER_WORD;
5477 else
5478 return basic_align;
5479 }
5480
5481 bool
5482 nds32_split_double_word_load_store_p(rtx *operands, bool load_p)
5483 {
5484 rtx mem = load_p ? operands[1] : operands[0];
5485 /* Do split at split2 if -O0 or schedule 2 not enable. */
5486 if (optimize == 0 || !flag_schedule_insns_after_reload)
5487 return !satisfies_constraint_Da (mem) || MEM_VOLATILE_P (mem);
5488
5489 /* Split double word load store after copy propgation. */
5490 if (current_pass == NULL)
5491 return false;
5492
5493 const char *pass_name = current_pass->name;
5494 if (pass_name && ((strcmp (pass_name, "split4") == 0)
5495 || (strcmp (pass_name, "split5") == 0)))
5496 return !satisfies_constraint_Da (mem) || MEM_VOLATILE_P (mem);
5497
5498 return false;
5499 }
5500
5501 static bool
5502 nds32_use_blocks_for_constant_p (machine_mode mode,
5503 const_rtx x ATTRIBUTE_UNUSED)
5504 {
5505 if ((TARGET_FPU_SINGLE || TARGET_FPU_DOUBLE)
5506 && (mode == DFmode || mode == SFmode))
5507 return true;
5508 else
5509 return false;
5510 }
5511
5512 /* ------------------------------------------------------------------------ */
5513
5514 /* PART 5: Initialize target hook structure and definitions. */
5515 \f
5516 /* Controlling the Compilation Driver. */
5517
5518 \f
5519 /* Run-time Target Specification. */
5520
5521 \f
5522 /* Defining Data Structures for Per-function Information. */
5523
5524 \f
5525 /* Storage Layout. */
5526
5527 #undef TARGET_PROMOTE_FUNCTION_MODE
5528 #define TARGET_PROMOTE_FUNCTION_MODE \
5529 default_promote_function_mode_always_promote
5530
5531 #undef TARGET_EXPAND_TO_RTL_HOOK
5532 #define TARGET_EXPAND_TO_RTL_HOOK nds32_expand_to_rtl_hook
5533
5534 #undef TARGET_CONSTANT_ALIGNMENT
5535 #define TARGET_CONSTANT_ALIGNMENT nds32_constant_alignment
5536
5537 \f
5538 /* Layout of Source Language Data Types. */
5539
5540 \f
5541 /* Register Usage. */
5542
5543 /* -- Basic Characteristics of Registers. */
5544
5545 #undef TARGET_CONDITIONAL_REGISTER_USAGE
5546 #define TARGET_CONDITIONAL_REGISTER_USAGE nds32_conditional_register_usage
5547
5548 /* -- Order of Allocation of Registers. */
5549
5550 /* -- How Values Fit in Registers. */
5551
5552 #undef TARGET_HARD_REGNO_NREGS
5553 #define TARGET_HARD_REGNO_NREGS nds32_hard_regno_nregs
5554
5555 #undef TARGET_HARD_REGNO_MODE_OK
5556 #define TARGET_HARD_REGNO_MODE_OK nds32_hard_regno_mode_ok
5557
5558 #undef TARGET_MODES_TIEABLE_P
5559 #define TARGET_MODES_TIEABLE_P nds32_modes_tieable_p
5560
5561 /* -- Handling Leaf Functions. */
5562
5563 /* -- Registers That Form a Stack. */
5564
5565 \f
5566 /* Register Classes. */
5567
5568 #undef TARGET_CLASS_MAX_NREGS
5569 #define TARGET_CLASS_MAX_NREGS nds32_class_max_nregs
5570
5571 #undef TARGET_REGISTER_PRIORITY
5572 #define TARGET_REGISTER_PRIORITY nds32_register_priority
5573
5574 #undef TARGET_CAN_CHANGE_MODE_CLASS
5575 #define TARGET_CAN_CHANGE_MODE_CLASS nds32_can_change_mode_class
5576
5577 \f
5578 /* Obsolete Macros for Defining Constraints. */
5579
5580 \f
5581 /* Stack Layout and Calling Conventions. */
5582
5583 /* -- Basic Stack Layout. */
5584
5585 /* -- Exception Handling Support. */
5586
5587 /* -- Specifying How Stack Checking is Done. */
5588
5589 /* -- Registers That Address the Stack Frame. */
5590
5591 /* -- Eliminating Frame Pointer and Arg Pointer. */
5592
5593 #undef TARGET_CAN_ELIMINATE
5594 #define TARGET_CAN_ELIMINATE nds32_can_eliminate
5595
5596 /* -- Passing Function Arguments on the Stack. */
5597
5598 /* -- Passing Arguments in Registers. */
5599
5600 #undef TARGET_FUNCTION_ARG
5601 #define TARGET_FUNCTION_ARG nds32_function_arg
5602
5603 #undef TARGET_MUST_PASS_IN_STACK
5604 #define TARGET_MUST_PASS_IN_STACK nds32_must_pass_in_stack
5605
5606 #undef TARGET_ARG_PARTIAL_BYTES
5607 #define TARGET_ARG_PARTIAL_BYTES nds32_arg_partial_bytes
5608
5609 #undef TARGET_FUNCTION_ARG_ADVANCE
5610 #define TARGET_FUNCTION_ARG_ADVANCE nds32_function_arg_advance
5611
5612 #undef TARGET_FUNCTION_ARG_BOUNDARY
5613 #define TARGET_FUNCTION_ARG_BOUNDARY nds32_function_arg_boundary
5614
5615 #undef TARGET_VECTOR_MODE_SUPPORTED_P
5616 #define TARGET_VECTOR_MODE_SUPPORTED_P nds32_vector_mode_supported_p
5617
5618 /* -- How Scalar Function Values Are Returned. */
5619
5620 #undef TARGET_FUNCTION_VALUE
5621 #define TARGET_FUNCTION_VALUE nds32_function_value
5622
5623 #undef TARGET_LIBCALL_VALUE
5624 #define TARGET_LIBCALL_VALUE nds32_libcall_value
5625
5626 #undef TARGET_FUNCTION_VALUE_REGNO_P
5627 #define TARGET_FUNCTION_VALUE_REGNO_P nds32_function_value_regno_p
5628
5629 /* -- How Large Values Are Returned. */
5630
5631 #undef TARGET_RETURN_IN_MEMORY
5632 #define TARGET_RETURN_IN_MEMORY nds32_return_in_memory
5633
5634 /* -- Caller-Saves Register Allocation. */
5635
5636 /* -- Function Entry and Exit. */
5637
5638 #undef TARGET_ASM_FUNCTION_PROLOGUE
5639 #define TARGET_ASM_FUNCTION_PROLOGUE nds32_asm_function_prologue
5640
5641 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
5642 #define TARGET_ASM_FUNCTION_END_PROLOGUE nds32_asm_function_end_prologue
5643
5644 #undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
5645 #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE nds32_asm_function_begin_epilogue
5646
5647 #undef TARGET_ASM_FUNCTION_EPILOGUE
5648 #define TARGET_ASM_FUNCTION_EPILOGUE nds32_asm_function_epilogue
5649
5650 #undef TARGET_ASM_OUTPUT_MI_THUNK
5651 #define TARGET_ASM_OUTPUT_MI_THUNK nds32_asm_output_mi_thunk
5652
5653 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
5654 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
5655
5656 /* -- Generating Code for Profiling. */
5657
5658 /* -- Permitting tail calls. */
5659
5660 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
5661 #define TARGET_FUNCTION_OK_FOR_SIBCALL nds32_function_ok_for_sibcall
5662
5663 #undef TARGET_WARN_FUNC_RETURN
5664 #define TARGET_WARN_FUNC_RETURN nds32_warn_func_return
5665
5666 /* Stack smashing protection. */
5667
5668 \f
5669 /* Implementing the Varargs Macros. */
5670
5671 #undef TARGET_SETUP_INCOMING_VARARGS
5672 #define TARGET_SETUP_INCOMING_VARARGS nds32_setup_incoming_varargs
5673
5674 #undef TARGET_STRICT_ARGUMENT_NAMING
5675 #define TARGET_STRICT_ARGUMENT_NAMING nds32_strict_argument_naming
5676
5677 \f
5678 /* Trampolines for Nested Functions. */
5679
5680 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
5681 #define TARGET_ASM_TRAMPOLINE_TEMPLATE nds32_asm_trampoline_template
5682
5683 #undef TARGET_TRAMPOLINE_INIT
5684 #define TARGET_TRAMPOLINE_INIT nds32_trampoline_init
5685
5686 \f
5687 /* Implicit Calls to Library Routines. */
5688
5689 \f
5690 /* Addressing Modes. */
5691
5692 #undef TARGET_LEGITIMATE_ADDRESS_P
5693 #define TARGET_LEGITIMATE_ADDRESS_P nds32_legitimate_address_p
5694
5695 #undef TARGET_LEGITIMIZE_ADDRESS
5696 #define TARGET_LEGITIMIZE_ADDRESS nds32_legitimize_address
5697
5698 #undef TARGET_LEGITIMATE_CONSTANT_P
5699 #define TARGET_LEGITIMATE_CONSTANT_P nds32_legitimate_constant_p
5700
5701 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
5702 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE nds32_vectorize_preferred_simd_mode
5703
5704 #undef TARGET_CANNOT_FORCE_CONST_MEM
5705 #define TARGET_CANNOT_FORCE_CONST_MEM nds32_cannot_force_const_mem
5706
5707 #undef TARGET_DELEGITIMIZE_ADDRESS
5708 #define TARGET_DELEGITIMIZE_ADDRESS nds32_delegitimize_address
5709
5710 \f
5711 /* Anchored Addresses. */
5712
5713 \f
5714 /* Condition Code Status. */
5715
5716 /* -- Representation of condition codes using (cc0). */
5717
5718 /* -- Representation of condition codes using registers. */
5719
5720 #undef TARGET_CANONICALIZE_COMPARISON
5721 #define TARGET_CANONICALIZE_COMPARISON nds32_canonicalize_comparison
5722
5723 /* -- Macros to control conditional execution. */
5724
5725 \f
5726 /* Describing Relative Costs of Operations. */
5727
5728 #undef TARGET_REGISTER_MOVE_COST
5729 #define TARGET_REGISTER_MOVE_COST nds32_register_move_cost
5730
5731 #undef TARGET_MEMORY_MOVE_COST
5732 #define TARGET_MEMORY_MOVE_COST nds32_memory_move_cost
5733
5734 #undef TARGET_RTX_COSTS
5735 #define TARGET_RTX_COSTS nds32_rtx_costs
5736
5737 #undef TARGET_ADDRESS_COST
5738 #define TARGET_ADDRESS_COST nds32_address_cost
5739
5740 \f
5741 /* Adjusting the Instruction Scheduler. */
5742
5743 \f
5744 /* Dividing the Output into Sections (Texts, Data, . . . ). */
5745
5746 #undef TARGET_ENCODE_SECTION_INFO
5747 #define TARGET_ENCODE_SECTION_INFO nds32_encode_section_info
5748
5749 \f
5750 /* Position Independent Code. */
5751
5752 \f
5753 /* Defining the Output Assembler Language. */
5754
5755 /* -- The Overall Framework of an Assembler File. */
5756
5757 #undef TARGET_ASM_FILE_START
5758 #define TARGET_ASM_FILE_START nds32_asm_file_start
5759 #undef TARGET_ASM_FILE_END
5760 #define TARGET_ASM_FILE_END nds32_asm_file_end
5761
5762 /* -- Output of Data. */
5763
5764 #undef TARGET_ASM_ALIGNED_HI_OP
5765 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
5766
5767 #undef TARGET_ASM_ALIGNED_SI_OP
5768 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
5769
5770 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
5771 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA nds32_asm_output_addr_const_extra
5772
5773 /* -- Output of Uninitialized Variables. */
5774
5775 /* -- Output and Generation of Labels. */
5776
5777 #undef TARGET_ASM_GLOBALIZE_LABEL
5778 #define TARGET_ASM_GLOBALIZE_LABEL nds32_asm_globalize_label
5779
5780 /* -- How Initialization Functions Are Handled. */
5781
5782 /* -- Macros Controlling Initialization Routines. */
5783
5784 /* -- Output of Assembler Instructions. */
5785
5786 #undef TARGET_PRINT_OPERAND
5787 #define TARGET_PRINT_OPERAND nds32_print_operand
5788 #undef TARGET_PRINT_OPERAND_ADDRESS
5789 #define TARGET_PRINT_OPERAND_ADDRESS nds32_print_operand_address
5790
5791 /* -- Output of Dispatch Tables. */
5792
5793 /* -- Assembler Commands for Exception Regions. */
5794
5795 #undef TARGET_DWARF_REGISTER_SPAN
5796 #define TARGET_DWARF_REGISTER_SPAN nds32_dwarf_register_span
5797
5798 /* -- Assembler Commands for Alignment. */
5799
5800 \f
5801 /* Controlling Debugging Information Format. */
5802
5803 /* -- Macros Affecting All Debugging Formats. */
5804
5805 /* -- Specific Options for DBX Output. */
5806
5807 /* -- Open-Ended Hooks for DBX Format. */
5808
5809 /* -- File Names in DBX Format. */
5810
5811 /* -- Macros for DWARF Output. */
5812
5813 /* -- Macros for VMS Debug Format. */
5814
5815 \f
5816 /* Cross Compilation and Floating Point. */
5817
5818 \f
5819 /* Mode Switching Instructions. */
5820
5821 \f
5822 /* Defining target-specific uses of __attribute__. */
5823
5824 #undef TARGET_ATTRIBUTE_TABLE
5825 #define TARGET_ATTRIBUTE_TABLE nds32_attribute_table
5826
5827 #undef TARGET_MERGE_DECL_ATTRIBUTES
5828 #define TARGET_MERGE_DECL_ATTRIBUTES nds32_merge_decl_attributes
5829
5830 #undef TARGET_INSERT_ATTRIBUTES
5831 #define TARGET_INSERT_ATTRIBUTES nds32_insert_attributes
5832
5833 #undef TARGET_OPTION_PRAGMA_PARSE
5834 #define TARGET_OPTION_PRAGMA_PARSE nds32_option_pragma_parse
5835
5836 #undef TARGET_OPTION_OVERRIDE
5837 #define TARGET_OPTION_OVERRIDE nds32_option_override
5838
5839 \f
5840 /* Emulating TLS. */
5841
5842 #undef TARGET_HAVE_TLS
5843 #define TARGET_HAVE_TLS TARGET_LINUX_ABI
5844
5845 \f
5846 /* Defining coprocessor specifics for MIPS targets. */
5847
5848 \f
5849 /* Parameters for Precompiled Header Validity Checking. */
5850
5851 \f
5852 /* C++ ABI parameters. */
5853
5854 \f
5855 /* Adding support for named address spaces. */
5856
5857 \f
5858 /* Miscellaneous Parameters. */
5859
5860 #undef TARGET_MD_ASM_ADJUST
5861 #define TARGET_MD_ASM_ADJUST nds32_md_asm_adjust
5862
5863 #undef TARGET_INIT_BUILTINS
5864 #define TARGET_INIT_BUILTINS nds32_init_builtins
5865
5866 #undef TARGET_BUILTIN_DECL
5867 #define TARGET_BUILTIN_DECL nds32_builtin_decl
5868
5869 #undef TARGET_EXPAND_BUILTIN
5870 #define TARGET_EXPAND_BUILTIN nds32_expand_builtin
5871
5872 #undef TARGET_INIT_LIBFUNCS
5873 #define TARGET_INIT_LIBFUNCS nds32_init_libfuncs
5874
5875 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
5876 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P nds32_use_blocks_for_constant_p
5877
5878 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
5879 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
5880
5881 \f
5882 /* ------------------------------------------------------------------------ */
5883
5884 /* Initialize the GCC target structure. */
5885
5886 struct gcc_target targetm = TARGET_INITIALIZER;
5887
5888 /* ------------------------------------------------------------------------ */