]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/aarch64/aarch64.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64.c
1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "insn-attr.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "df.h"
31 #include "hard-reg-set.h"
32 #include "output.h"
33 #include "expr.h"
34 #include "reload.h"
35 #include "toplev.h"
36 #include "target.h"
37 #include "target-def.h"
38 #include "targhooks.h"
39 #include "ggc.h"
40 #include "function.h"
41 #include "tm_p.h"
42 #include "recog.h"
43 #include "langhooks.h"
44 #include "diagnostic-core.h"
45 #include "gimple.h"
46 #include "optabs.h"
47 #include "dwarf2.h"
48 #include "cfgloop.h"
49 #include "tree-vectorizer.h"
50
51 /* Defined for convenience. */
52 #define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT)
53
54 /* Classifies an address.
55
56 ADDRESS_REG_IMM
57 A simple base register plus immediate offset.
58
59 ADDRESS_REG_WB
60 A base register indexed by immediate offset with writeback.
61
62 ADDRESS_REG_REG
63 A base register indexed by (optionally scaled) register.
64
65 ADDRESS_REG_UXTW
66 A base register indexed by (optionally scaled) zero-extended register.
67
68 ADDRESS_REG_SXTW
69 A base register indexed by (optionally scaled) sign-extended register.
70
71 ADDRESS_LO_SUM
72 A LO_SUM rtx with a base register and "LO12" symbol relocation.
73
74 ADDRESS_SYMBOLIC:
75 A constant symbolic address, in pc-relative literal pool. */
76
77 enum aarch64_address_type {
78 ADDRESS_REG_IMM,
79 ADDRESS_REG_WB,
80 ADDRESS_REG_REG,
81 ADDRESS_REG_UXTW,
82 ADDRESS_REG_SXTW,
83 ADDRESS_LO_SUM,
84 ADDRESS_SYMBOLIC
85 };
86
87 struct aarch64_address_info {
88 enum aarch64_address_type type;
89 rtx base;
90 rtx offset;
91 int shift;
92 enum aarch64_symbol_type symbol_type;
93 };
94
95 struct simd_immediate_info
96 {
97 rtx value;
98 int shift;
99 int element_width;
100 bool mvn;
101 bool msl;
102 };
103
104 /* The current code model. */
105 enum aarch64_code_model aarch64_cmodel;
106
107 #ifdef HAVE_AS_TLS
108 #undef TARGET_HAVE_TLS
109 #define TARGET_HAVE_TLS 1
110 #endif
111
112 static bool aarch64_lra_p (void);
113 static bool aarch64_composite_type_p (const_tree, enum machine_mode);
114 static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
115 const_tree,
116 enum machine_mode *, int *,
117 bool *);
118 static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
119 static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
120 static void aarch64_override_options_after_change (void);
121 static bool aarch64_vector_mode_supported_p (enum machine_mode);
122 static unsigned bit_count (unsigned HOST_WIDE_INT);
123 static bool aarch64_const_vec_all_same_int_p (rtx,
124 HOST_WIDE_INT, HOST_WIDE_INT);
125
126 static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
127 const unsigned char *sel);
128
129 /* The processor for which instructions should be scheduled. */
130 enum aarch64_processor aarch64_tune = generic;
131
132 /* The current tuning set. */
133 const struct tune_params *aarch64_tune_params;
134
135 /* Mask to specify which instructions we are allowed to generate. */
136 unsigned long aarch64_isa_flags = 0;
137
138 /* Mask to specify which instruction scheduling options should be used. */
139 unsigned long aarch64_tune_flags = 0;
140
141 /* Tuning parameters. */
142
143 #if HAVE_DESIGNATED_INITIALIZERS
144 #define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
145 #else
146 #define NAMED_PARAM(NAME, VAL) (VAL)
147 #endif
148
149 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
150 __extension__
151 #endif
152 static const struct cpu_rtx_cost_table generic_rtx_cost_table =
153 {
154 NAMED_PARAM (memory_load, COSTS_N_INSNS (1)),
155 NAMED_PARAM (memory_store, COSTS_N_INSNS (0)),
156 NAMED_PARAM (register_shift, COSTS_N_INSNS (1)),
157 NAMED_PARAM (int_divide, COSTS_N_INSNS (6)),
158 NAMED_PARAM (float_divide, COSTS_N_INSNS (2)),
159 NAMED_PARAM (double_divide, COSTS_N_INSNS (6)),
160 NAMED_PARAM (int_multiply, COSTS_N_INSNS (1)),
161 NAMED_PARAM (int_multiply_extend, COSTS_N_INSNS (1)),
162 NAMED_PARAM (int_multiply_add, COSTS_N_INSNS (1)),
163 NAMED_PARAM (int_multiply_extend_add, COSTS_N_INSNS (1)),
164 NAMED_PARAM (float_multiply, COSTS_N_INSNS (0)),
165 NAMED_PARAM (double_multiply, COSTS_N_INSNS (1))
166 };
167
168 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
169 __extension__
170 #endif
171 static const struct cpu_addrcost_table generic_addrcost_table =
172 {
173 NAMED_PARAM (pre_modify, 0),
174 NAMED_PARAM (post_modify, 0),
175 NAMED_PARAM (register_offset, 0),
176 NAMED_PARAM (register_extend, 0),
177 NAMED_PARAM (imm_offset, 0)
178 };
179
180 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
181 __extension__
182 #endif
183 static const struct cpu_regmove_cost generic_regmove_cost =
184 {
185 NAMED_PARAM (GP2GP, 1),
186 NAMED_PARAM (GP2FP, 2),
187 NAMED_PARAM (FP2GP, 2),
188 /* We currently do not provide direct support for TFmode Q->Q move.
189 Therefore we need to raise the cost above 2 in order to have
190 reload handle the situation. */
191 NAMED_PARAM (FP2FP, 4)
192 };
193
194 /* Generic costs for vector insn classes. */
195 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
196 __extension__
197 #endif
198 static const struct cpu_vector_cost generic_vector_cost =
199 {
200 NAMED_PARAM (scalar_stmt_cost, 1),
201 NAMED_PARAM (scalar_load_cost, 1),
202 NAMED_PARAM (scalar_store_cost, 1),
203 NAMED_PARAM (vec_stmt_cost, 1),
204 NAMED_PARAM (vec_to_scalar_cost, 1),
205 NAMED_PARAM (scalar_to_vec_cost, 1),
206 NAMED_PARAM (vec_align_load_cost, 1),
207 NAMED_PARAM (vec_unalign_load_cost, 1),
208 NAMED_PARAM (vec_unalign_store_cost, 1),
209 NAMED_PARAM (vec_store_cost, 1),
210 NAMED_PARAM (cond_taken_branch_cost, 3),
211 NAMED_PARAM (cond_not_taken_branch_cost, 1)
212 };
213
214 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
215 __extension__
216 #endif
217 static const struct tune_params generic_tunings =
218 {
219 &generic_rtx_cost_table,
220 &generic_addrcost_table,
221 &generic_regmove_cost,
222 &generic_vector_cost,
223 NAMED_PARAM (memmov_cost, 4)
224 };
225
226 /* A processor implementing AArch64. */
227 struct processor
228 {
229 const char *const name;
230 enum aarch64_processor core;
231 const char *arch;
232 const unsigned long flags;
233 const struct tune_params *const tune;
234 };
235
236 /* Processor cores implementing AArch64. */
237 static const struct processor all_cores[] =
238 {
239 #define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
240 {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
241 #include "aarch64-cores.def"
242 #undef AARCH64_CORE
243 {"generic", generic, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings},
244 {NULL, aarch64_none, NULL, 0, NULL}
245 };
246
247 /* Architectures implementing AArch64. */
248 static const struct processor all_architectures[] =
249 {
250 #define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
251 {NAME, CORE, #ARCH, FLAGS, NULL},
252 #include "aarch64-arches.def"
253 #undef AARCH64_ARCH
254 {"generic", generic, "8", AARCH64_FL_FOR_ARCH8, NULL},
255 {NULL, aarch64_none, NULL, 0, NULL}
256 };
257
258 /* Target specification. These are populated as commandline arguments
259 are processed, or NULL if not specified. */
260 static const struct processor *selected_arch;
261 static const struct processor *selected_cpu;
262 static const struct processor *selected_tune;
263
264 #define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
265
266 /* An ISA extension in the co-processor and main instruction set space. */
267 struct aarch64_option_extension
268 {
269 const char *const name;
270 const unsigned long flags_on;
271 const unsigned long flags_off;
272 };
273
274 /* ISA extensions in AArch64. */
275 static const struct aarch64_option_extension all_extensions[] =
276 {
277 #define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
278 {NAME, FLAGS_ON, FLAGS_OFF},
279 #include "aarch64-option-extensions.def"
280 #undef AARCH64_OPT_EXTENSION
281 {NULL, 0, 0}
282 };
283
284 /* Used to track the size of an address when generating a pre/post
285 increment address. */
286 static enum machine_mode aarch64_memory_reference_mode;
287
288 /* Used to force GTY into this file. */
289 static GTY(()) int gty_dummy;
290
291 /* A table of valid AArch64 "bitmask immediate" values for
292 logical instructions. */
293
294 #define AARCH64_NUM_BITMASKS 5334
295 static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS];
296
297 /* Did we set flag_omit_frame_pointer just so
298 aarch64_frame_pointer_required would be called? */
299 static bool faked_omit_frame_pointer;
300
301 typedef enum aarch64_cond_code
302 {
303 AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
304 AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
305 AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
306 }
307 aarch64_cc;
308
309 #define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
310
311 /* The condition codes of the processor, and the inverse function. */
312 static const char * const aarch64_condition_codes[] =
313 {
314 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
315 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
316 };
317
318 /* Provide a mapping from gcc register numbers to dwarf register numbers. */
319 unsigned
320 aarch64_dbx_register_number (unsigned regno)
321 {
322 if (GP_REGNUM_P (regno))
323 return AARCH64_DWARF_R0 + regno - R0_REGNUM;
324 else if (regno == SP_REGNUM)
325 return AARCH64_DWARF_SP;
326 else if (FP_REGNUM_P (regno))
327 return AARCH64_DWARF_V0 + regno - V0_REGNUM;
328
329 /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
330 equivalent DWARF register. */
331 return DWARF_FRAME_REGISTERS;
332 }
333
334 /* Return TRUE if MODE is any of the large INT modes. */
335 static bool
336 aarch64_vect_struct_mode_p (enum machine_mode mode)
337 {
338 return mode == OImode || mode == CImode || mode == XImode;
339 }
340
341 /* Return TRUE if MODE is any of the vector modes. */
342 static bool
343 aarch64_vector_mode_p (enum machine_mode mode)
344 {
345 return aarch64_vector_mode_supported_p (mode)
346 || aarch64_vect_struct_mode_p (mode);
347 }
348
349 /* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
350 static bool
351 aarch64_array_mode_supported_p (enum machine_mode mode,
352 unsigned HOST_WIDE_INT nelems)
353 {
354 if (TARGET_SIMD
355 && AARCH64_VALID_SIMD_QREG_MODE (mode)
356 && (nelems >= 2 && nelems <= 4))
357 return true;
358
359 return false;
360 }
361
362 /* Implement HARD_REGNO_NREGS. */
363
364 int
365 aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode)
366 {
367 switch (aarch64_regno_regclass (regno))
368 {
369 case FP_REGS:
370 case FP_LO_REGS:
371 return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
372 default:
373 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
374 }
375 gcc_unreachable ();
376 }
377
378 /* Implement HARD_REGNO_MODE_OK. */
379
380 int
381 aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode)
382 {
383 if (GET_MODE_CLASS (mode) == MODE_CC)
384 return regno == CC_REGNUM;
385
386 if (regno == SP_REGNUM)
387 /* The purpose of comparing with ptr_mode is to support the
388 global register variable associated with the stack pointer
389 register via the syntax of asm ("wsp") in ILP32. */
390 return mode == Pmode || mode == ptr_mode;
391
392 if (regno == FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM)
393 return mode == Pmode;
394
395 if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
396 return 1;
397
398 if (FP_REGNUM_P (regno))
399 {
400 if (aarch64_vect_struct_mode_p (mode))
401 return
402 (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
403 else
404 return 1;
405 }
406
407 return 0;
408 }
409
410 /* Return true if calls to DECL should be treated as
411 long-calls (ie called via a register). */
412 static bool
413 aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
414 {
415 return false;
416 }
417
418 /* Return true if calls to symbol-ref SYM should be treated as
419 long-calls (ie called via a register). */
420 bool
421 aarch64_is_long_call_p (rtx sym)
422 {
423 return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
424 }
425
426 /* Return true if the offsets to a zero/sign-extract operation
427 represent an expression that matches an extend operation. The
428 operands represent the paramters from
429
430 (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)). */
431 bool
432 aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm,
433 rtx extract_imm)
434 {
435 HOST_WIDE_INT mult_val, extract_val;
436
437 if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
438 return false;
439
440 mult_val = INTVAL (mult_imm);
441 extract_val = INTVAL (extract_imm);
442
443 if (extract_val > 8
444 && extract_val < GET_MODE_BITSIZE (mode)
445 && exact_log2 (extract_val & ~7) > 0
446 && (extract_val & 7) <= 4
447 && mult_val == (1 << (extract_val & 7)))
448 return true;
449
450 return false;
451 }
452
453 /* Emit an insn that's a simple single-set. Both the operands must be
454 known to be valid. */
455 inline static rtx
456 emit_set_insn (rtx x, rtx y)
457 {
458 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
459 }
460
461 /* X and Y are two things to compare using CODE. Emit the compare insn and
462 return the rtx for register 0 in the proper mode. */
463 rtx
464 aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
465 {
466 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
467 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
468
469 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
470 return cc_reg;
471 }
472
473 /* Build the SYMBOL_REF for __tls_get_addr. */
474
475 static GTY(()) rtx tls_get_addr_libfunc;
476
477 rtx
478 aarch64_tls_get_addr (void)
479 {
480 if (!tls_get_addr_libfunc)
481 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
482 return tls_get_addr_libfunc;
483 }
484
485 /* Return the TLS model to use for ADDR. */
486
487 static enum tls_model
488 tls_symbolic_operand_type (rtx addr)
489 {
490 enum tls_model tls_kind = TLS_MODEL_NONE;
491 rtx sym, addend;
492
493 if (GET_CODE (addr) == CONST)
494 {
495 split_const (addr, &sym, &addend);
496 if (GET_CODE (sym) == SYMBOL_REF)
497 tls_kind = SYMBOL_REF_TLS_MODEL (sym);
498 }
499 else if (GET_CODE (addr) == SYMBOL_REF)
500 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
501
502 return tls_kind;
503 }
504
505 /* We'll allow lo_sum's in addresses in our legitimate addresses
506 so that combine would take care of combining addresses where
507 necessary, but for generation purposes, we'll generate the address
508 as :
509 RTL Absolute
510 tmp = hi (symbol_ref); adrp x1, foo
511 dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
512 nop
513
514 PIC TLS
515 adrp x1, :got:foo adrp tmp, :tlsgd:foo
516 ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
517 bl __tls_get_addr
518 nop
519
520 Load TLS symbol, depending on TLS mechanism and TLS access model.
521
522 Global Dynamic - Traditional TLS:
523 adrp tmp, :tlsgd:imm
524 add dest, tmp, #:tlsgd_lo12:imm
525 bl __tls_get_addr
526
527 Global Dynamic - TLS Descriptors:
528 adrp dest, :tlsdesc:imm
529 ldr tmp, [dest, #:tlsdesc_lo12:imm]
530 add dest, dest, #:tlsdesc_lo12:imm
531 blr tmp
532 mrs tp, tpidr_el0
533 add dest, dest, tp
534
535 Initial Exec:
536 mrs tp, tpidr_el0
537 adrp tmp, :gottprel:imm
538 ldr dest, [tmp, #:gottprel_lo12:imm]
539 add dest, dest, tp
540
541 Local Exec:
542 mrs tp, tpidr_el0
543 add t0, tp, #:tprel_hi12:imm
544 add t0, #:tprel_lo12_nc:imm
545 */
546
547 static void
548 aarch64_load_symref_appropriately (rtx dest, rtx imm,
549 enum aarch64_symbol_type type)
550 {
551 switch (type)
552 {
553 case SYMBOL_SMALL_ABSOLUTE:
554 {
555 /* In ILP32, the mode of dest can be either SImode or DImode. */
556 rtx tmp_reg = dest;
557 enum machine_mode mode = GET_MODE (dest);
558
559 gcc_assert (mode == Pmode || mode == ptr_mode);
560
561 if (can_create_pseudo_p ())
562 tmp_reg = gen_reg_rtx (mode);
563
564 emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
565 emit_insn (gen_add_losym (dest, tmp_reg, imm));
566 return;
567 }
568
569 case SYMBOL_TINY_ABSOLUTE:
570 emit_insn (gen_rtx_SET (Pmode, dest, imm));
571 return;
572
573 case SYMBOL_SMALL_GOT:
574 {
575 /* In ILP32, the mode of dest can be either SImode or DImode,
576 while the got entry is always of SImode size. The mode of
577 dest depends on how dest is used: if dest is assigned to a
578 pointer (e.g. in the memory), it has SImode; it may have
579 DImode if dest is dereferenced to access the memeory.
580 This is why we have to handle three different ldr_got_small
581 patterns here (two patterns for ILP32). */
582 rtx tmp_reg = dest;
583 enum machine_mode mode = GET_MODE (dest);
584
585 if (can_create_pseudo_p ())
586 tmp_reg = gen_reg_rtx (mode);
587
588 emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
589 if (mode == ptr_mode)
590 {
591 if (mode == DImode)
592 emit_insn (gen_ldr_got_small_di (dest, tmp_reg, imm));
593 else
594 emit_insn (gen_ldr_got_small_si (dest, tmp_reg, imm));
595 }
596 else
597 {
598 gcc_assert (mode == Pmode);
599 emit_insn (gen_ldr_got_small_sidi (dest, tmp_reg, imm));
600 }
601
602 return;
603 }
604
605 case SYMBOL_SMALL_TLSGD:
606 {
607 rtx insns;
608 rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
609
610 start_sequence ();
611 emit_call_insn (gen_tlsgd_small (result, imm));
612 insns = get_insns ();
613 end_sequence ();
614
615 RTL_CONST_CALL_P (insns) = 1;
616 emit_libcall_block (insns, dest, result, imm);
617 return;
618 }
619
620 case SYMBOL_SMALL_TLSDESC:
621 {
622 rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM);
623 rtx tp;
624
625 emit_insn (gen_tlsdesc_small (imm));
626 tp = aarch64_load_tp (NULL);
627 emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0)));
628 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
629 return;
630 }
631
632 case SYMBOL_SMALL_GOTTPREL:
633 {
634 rtx tmp_reg = gen_reg_rtx (Pmode);
635 rtx tp = aarch64_load_tp (NULL);
636 emit_insn (gen_tlsie_small (tmp_reg, imm));
637 emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg)));
638 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
639 return;
640 }
641
642 case SYMBOL_SMALL_TPREL:
643 {
644 rtx tp = aarch64_load_tp (NULL);
645 emit_insn (gen_tlsle_small (dest, tp, imm));
646 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
647 return;
648 }
649
650 case SYMBOL_TINY_GOT:
651 emit_insn (gen_ldr_got_tiny (dest, imm));
652 return;
653
654 default:
655 gcc_unreachable ();
656 }
657 }
658
659 /* Emit a move from SRC to DEST. Assume that the move expanders can
660 handle all moves if !can_create_pseudo_p (). The distinction is
661 important because, unlike emit_move_insn, the move expanders know
662 how to force Pmode objects into the constant pool even when the
663 constant pool address is not itself legitimate. */
664 static rtx
665 aarch64_emit_move (rtx dest, rtx src)
666 {
667 return (can_create_pseudo_p ()
668 ? emit_move_insn (dest, src)
669 : emit_move_insn_1 (dest, src));
670 }
671
672 void
673 aarch64_split_128bit_move (rtx dst, rtx src)
674 {
675 rtx low_dst;
676
677 enum machine_mode src_mode = GET_MODE (src);
678 enum machine_mode dst_mode = GET_MODE (dst);
679 int src_regno = REGNO (src);
680 int dst_regno = REGNO (dst);
681
682 gcc_assert (dst_mode == TImode || dst_mode == TFmode);
683
684 if (REG_P (dst) && REG_P (src))
685 {
686 gcc_assert (src_mode == TImode || src_mode == TFmode);
687
688 /* Handle r -> w, w -> r. */
689 if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
690 {
691 switch (src_mode) {
692 case TImode:
693 emit_insn
694 (gen_aarch64_movtilow_di (dst, gen_lowpart (word_mode, src)));
695 emit_insn
696 (gen_aarch64_movtihigh_di (dst, gen_highpart (word_mode, src)));
697 return;
698 case TFmode:
699 emit_insn
700 (gen_aarch64_movtflow_di (dst, gen_lowpart (word_mode, src)));
701 emit_insn
702 (gen_aarch64_movtfhigh_di (dst, gen_highpart (word_mode, src)));
703 return;
704 default:
705 gcc_unreachable ();
706 }
707 }
708 else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
709 {
710 switch (src_mode) {
711 case TImode:
712 emit_insn
713 (gen_aarch64_movdi_tilow (gen_lowpart (word_mode, dst), src));
714 emit_insn
715 (gen_aarch64_movdi_tihigh (gen_highpart (word_mode, dst), src));
716 return;
717 case TFmode:
718 emit_insn
719 (gen_aarch64_movdi_tflow (gen_lowpart (word_mode, dst), src));
720 emit_insn
721 (gen_aarch64_movdi_tfhigh (gen_highpart (word_mode, dst), src));
722 return;
723 default:
724 gcc_unreachable ();
725 }
726 }
727 /* Fall through to r -> r cases. */
728 }
729
730 switch (dst_mode) {
731 case TImode:
732 low_dst = gen_lowpart (word_mode, dst);
733 if (REG_P (low_dst)
734 && reg_overlap_mentioned_p (low_dst, src))
735 {
736 aarch64_emit_move (gen_highpart (word_mode, dst),
737 gen_highpart_mode (word_mode, TImode, src));
738 aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
739 }
740 else
741 {
742 aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
743 aarch64_emit_move (gen_highpart (word_mode, dst),
744 gen_highpart_mode (word_mode, TImode, src));
745 }
746 return;
747 case TFmode:
748 emit_move_insn (gen_rtx_REG (DFmode, dst_regno),
749 gen_rtx_REG (DFmode, src_regno));
750 emit_move_insn (gen_rtx_REG (DFmode, dst_regno + 1),
751 gen_rtx_REG (DFmode, src_regno + 1));
752 return;
753 default:
754 gcc_unreachable ();
755 }
756 }
757
758 bool
759 aarch64_split_128bit_move_p (rtx dst, rtx src)
760 {
761 return (! REG_P (src)
762 || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
763 }
764
765 /* Split a complex SIMD combine. */
766
767 void
768 aarch64_split_simd_combine (rtx dst, rtx src1, rtx src2)
769 {
770 enum machine_mode src_mode = GET_MODE (src1);
771 enum machine_mode dst_mode = GET_MODE (dst);
772
773 gcc_assert (VECTOR_MODE_P (dst_mode));
774
775 if (REG_P (dst) && REG_P (src1) && REG_P (src2))
776 {
777 rtx (*gen) (rtx, rtx, rtx);
778
779 switch (src_mode)
780 {
781 case V8QImode:
782 gen = gen_aarch64_simd_combinev8qi;
783 break;
784 case V4HImode:
785 gen = gen_aarch64_simd_combinev4hi;
786 break;
787 case V2SImode:
788 gen = gen_aarch64_simd_combinev2si;
789 break;
790 case V2SFmode:
791 gen = gen_aarch64_simd_combinev2sf;
792 break;
793 case DImode:
794 gen = gen_aarch64_simd_combinedi;
795 break;
796 case DFmode:
797 gen = gen_aarch64_simd_combinedf;
798 break;
799 default:
800 gcc_unreachable ();
801 }
802
803 emit_insn (gen (dst, src1, src2));
804 return;
805 }
806 }
807
808 /* Split a complex SIMD move. */
809
810 void
811 aarch64_split_simd_move (rtx dst, rtx src)
812 {
813 enum machine_mode src_mode = GET_MODE (src);
814 enum machine_mode dst_mode = GET_MODE (dst);
815
816 gcc_assert (VECTOR_MODE_P (dst_mode));
817
818 if (REG_P (dst) && REG_P (src))
819 {
820 rtx (*gen) (rtx, rtx);
821
822 gcc_assert (VECTOR_MODE_P (src_mode));
823
824 switch (src_mode)
825 {
826 case V16QImode:
827 gen = gen_aarch64_split_simd_movv16qi;
828 break;
829 case V8HImode:
830 gen = gen_aarch64_split_simd_movv8hi;
831 break;
832 case V4SImode:
833 gen = gen_aarch64_split_simd_movv4si;
834 break;
835 case V2DImode:
836 gen = gen_aarch64_split_simd_movv2di;
837 break;
838 case V4SFmode:
839 gen = gen_aarch64_split_simd_movv4sf;
840 break;
841 case V2DFmode:
842 gen = gen_aarch64_split_simd_movv2df;
843 break;
844 default:
845 gcc_unreachable ();
846 }
847
848 emit_insn (gen (dst, src));
849 return;
850 }
851 }
852
853 static rtx
854 aarch64_force_temporary (enum machine_mode mode, rtx x, rtx value)
855 {
856 if (can_create_pseudo_p ())
857 return force_reg (mode, value);
858 else
859 {
860 x = aarch64_emit_move (x, value);
861 return x;
862 }
863 }
864
865
866 static rtx
867 aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
868 {
869 if (!aarch64_plus_immediate (GEN_INT (offset), mode))
870 {
871 rtx high;
872 /* Load the full offset into a register. This
873 might be improvable in the future. */
874 high = GEN_INT (offset);
875 offset = 0;
876 high = aarch64_force_temporary (mode, temp, high);
877 reg = aarch64_force_temporary (mode, temp,
878 gen_rtx_PLUS (mode, high, reg));
879 }
880 return plus_constant (mode, reg, offset);
881 }
882
883 void
884 aarch64_expand_mov_immediate (rtx dest, rtx imm)
885 {
886 enum machine_mode mode = GET_MODE (dest);
887 unsigned HOST_WIDE_INT mask;
888 int i;
889 bool first;
890 unsigned HOST_WIDE_INT val;
891 bool subtargets;
892 rtx subtarget;
893 int one_match, zero_match;
894
895 gcc_assert (mode == SImode || mode == DImode);
896
897 /* Check on what type of symbol it is. */
898 if (GET_CODE (imm) == SYMBOL_REF
899 || GET_CODE (imm) == LABEL_REF
900 || GET_CODE (imm) == CONST)
901 {
902 rtx mem, base, offset;
903 enum aarch64_symbol_type sty;
904
905 /* If we have (const (plus symbol offset)), separate out the offset
906 before we start classifying the symbol. */
907 split_const (imm, &base, &offset);
908
909 sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR);
910 switch (sty)
911 {
912 case SYMBOL_FORCE_TO_MEM:
913 if (offset != const0_rtx
914 && targetm.cannot_force_const_mem (mode, imm))
915 {
916 gcc_assert(can_create_pseudo_p ());
917 base = aarch64_force_temporary (mode, dest, base);
918 base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
919 aarch64_emit_move (dest, base);
920 return;
921 }
922 mem = force_const_mem (ptr_mode, imm);
923 gcc_assert (mem);
924 if (mode != ptr_mode)
925 mem = gen_rtx_ZERO_EXTEND (mode, mem);
926 emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
927 return;
928
929 case SYMBOL_SMALL_TLSGD:
930 case SYMBOL_SMALL_TLSDESC:
931 case SYMBOL_SMALL_GOTTPREL:
932 case SYMBOL_SMALL_GOT:
933 case SYMBOL_TINY_GOT:
934 if (offset != const0_rtx)
935 {
936 gcc_assert(can_create_pseudo_p ());
937 base = aarch64_force_temporary (mode, dest, base);
938 base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
939 aarch64_emit_move (dest, base);
940 return;
941 }
942 /* FALLTHRU */
943
944 case SYMBOL_SMALL_TPREL:
945 case SYMBOL_SMALL_ABSOLUTE:
946 case SYMBOL_TINY_ABSOLUTE:
947 aarch64_load_symref_appropriately (dest, imm, sty);
948 return;
949
950 default:
951 gcc_unreachable ();
952 }
953 }
954
955 if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode))
956 {
957 emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
958 return;
959 }
960
961 if (!CONST_INT_P (imm))
962 {
963 if (GET_CODE (imm) == HIGH)
964 emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
965 else
966 {
967 rtx mem = force_const_mem (mode, imm);
968 gcc_assert (mem);
969 emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
970 }
971
972 return;
973 }
974
975 if (mode == SImode)
976 {
977 /* We know we can't do this in 1 insn, and we must be able to do it
978 in two; so don't mess around looking for sequences that don't buy
979 us anything. */
980 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff)));
981 emit_insn (gen_insv_immsi (dest, GEN_INT (16),
982 GEN_INT ((INTVAL (imm) >> 16) & 0xffff)));
983 return;
984 }
985
986 /* Remaining cases are all for DImode. */
987
988 val = INTVAL (imm);
989 subtargets = optimize && can_create_pseudo_p ();
990
991 one_match = 0;
992 zero_match = 0;
993 mask = 0xffff;
994
995 for (i = 0; i < 64; i += 16, mask <<= 16)
996 {
997 if ((val & mask) == 0)
998 zero_match++;
999 else if ((val & mask) == mask)
1000 one_match++;
1001 }
1002
1003 if (one_match == 2)
1004 {
1005 mask = 0xffff;
1006 for (i = 0; i < 64; i += 16, mask <<= 16)
1007 {
1008 if ((val & mask) != mask)
1009 {
1010 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask)));
1011 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
1012 GEN_INT ((val >> i) & 0xffff)));
1013 return;
1014 }
1015 }
1016 gcc_unreachable ();
1017 }
1018
1019 if (zero_match == 2)
1020 goto simple_sequence;
1021
1022 mask = 0x0ffff0000UL;
1023 for (i = 16; i < 64; i += 16, mask <<= 16)
1024 {
1025 HOST_WIDE_INT comp = mask & ~(mask - 1);
1026
1027 if (aarch64_uimm12_shift (val - (val & mask)))
1028 {
1029 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1030
1031 emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask)));
1032 emit_insn (gen_adddi3 (dest, subtarget,
1033 GEN_INT (val - (val & mask))));
1034 return;
1035 }
1036 else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask))))
1037 {
1038 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1039
1040 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1041 GEN_INT ((val + comp) & mask)));
1042 emit_insn (gen_adddi3 (dest, subtarget,
1043 GEN_INT (val - ((val + comp) & mask))));
1044 return;
1045 }
1046 else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask)))
1047 {
1048 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1049
1050 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1051 GEN_INT ((val - comp) | ~mask)));
1052 emit_insn (gen_adddi3 (dest, subtarget,
1053 GEN_INT (val - ((val - comp) | ~mask))));
1054 return;
1055 }
1056 else if (aarch64_uimm12_shift (-(val - (val | ~mask))))
1057 {
1058 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1059
1060 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1061 GEN_INT (val | ~mask)));
1062 emit_insn (gen_adddi3 (dest, subtarget,
1063 GEN_INT (val - (val | ~mask))));
1064 return;
1065 }
1066 }
1067
1068 /* See if we can do it by arithmetically combining two
1069 immediates. */
1070 for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
1071 {
1072 int j;
1073 mask = 0xffff;
1074
1075 if (aarch64_uimm12_shift (val - aarch64_bitmasks[i])
1076 || aarch64_uimm12_shift (-val + aarch64_bitmasks[i]))
1077 {
1078 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1079 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1080 GEN_INT (aarch64_bitmasks[i])));
1081 emit_insn (gen_adddi3 (dest, subtarget,
1082 GEN_INT (val - aarch64_bitmasks[i])));
1083 return;
1084 }
1085
1086 for (j = 0; j < 64; j += 16, mask <<= 16)
1087 {
1088 if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask))
1089 {
1090 emit_insn (gen_rtx_SET (VOIDmode, dest,
1091 GEN_INT (aarch64_bitmasks[i])));
1092 emit_insn (gen_insv_immdi (dest, GEN_INT (j),
1093 GEN_INT ((val >> j) & 0xffff)));
1094 return;
1095 }
1096 }
1097 }
1098
1099 /* See if we can do it by logically combining two immediates. */
1100 for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
1101 {
1102 if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i])
1103 {
1104 int j;
1105
1106 for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
1107 if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j]))
1108 {
1109 subtarget = subtargets ? gen_reg_rtx (mode) : dest;
1110 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1111 GEN_INT (aarch64_bitmasks[i])));
1112 emit_insn (gen_iordi3 (dest, subtarget,
1113 GEN_INT (aarch64_bitmasks[j])));
1114 return;
1115 }
1116 }
1117 else if ((val & aarch64_bitmasks[i]) == val)
1118 {
1119 int j;
1120
1121 for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
1122 if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i]))
1123 {
1124
1125 subtarget = subtargets ? gen_reg_rtx (mode) : dest;
1126 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1127 GEN_INT (aarch64_bitmasks[j])));
1128 emit_insn (gen_anddi3 (dest, subtarget,
1129 GEN_INT (aarch64_bitmasks[i])));
1130 return;
1131 }
1132 }
1133 }
1134
1135 simple_sequence:
1136 first = true;
1137 mask = 0xffff;
1138 for (i = 0; i < 64; i += 16, mask <<= 16)
1139 {
1140 if ((val & mask) != 0)
1141 {
1142 if (first)
1143 {
1144 emit_insn (gen_rtx_SET (VOIDmode, dest,
1145 GEN_INT (val & mask)));
1146 first = false;
1147 }
1148 else
1149 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
1150 GEN_INT ((val >> i) & 0xffff)));
1151 }
1152 }
1153 }
1154
1155 static bool
1156 aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1157 {
1158 /* Indirect calls are not currently supported. */
1159 if (decl == NULL)
1160 return false;
1161
1162 /* Cannot tail-call to long-calls, since these are outside of the
1163 range of a branch instruction (we could handle this if we added
1164 support for indirect tail-calls. */
1165 if (aarch64_decl_is_long_call_p (decl))
1166 return false;
1167
1168 return true;
1169 }
1170
1171 /* Implement TARGET_PASS_BY_REFERENCE. */
1172
1173 static bool
1174 aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
1175 enum machine_mode mode,
1176 const_tree type,
1177 bool named ATTRIBUTE_UNUSED)
1178 {
1179 HOST_WIDE_INT size;
1180 enum machine_mode dummymode;
1181 int nregs;
1182
1183 /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
1184 size = (mode == BLKmode && type)
1185 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
1186
1187 if (type)
1188 {
1189 /* Arrays always passed by reference. */
1190 if (TREE_CODE (type) == ARRAY_TYPE)
1191 return true;
1192 /* Other aggregates based on their size. */
1193 if (AGGREGATE_TYPE_P (type))
1194 size = int_size_in_bytes (type);
1195 }
1196
1197 /* Variable sized arguments are always returned by reference. */
1198 if (size < 0)
1199 return true;
1200
1201 /* Can this be a candidate to be passed in fp/simd register(s)? */
1202 if (aarch64_vfp_is_call_or_return_candidate (mode, type,
1203 &dummymode, &nregs,
1204 NULL))
1205 return false;
1206
1207 /* Arguments which are variable sized or larger than 2 registers are
1208 passed by reference unless they are a homogenous floating point
1209 aggregate. */
1210 return size > 2 * UNITS_PER_WORD;
1211 }
1212
1213 /* Return TRUE if VALTYPE is padded to its least significant bits. */
1214 static bool
1215 aarch64_return_in_msb (const_tree valtype)
1216 {
1217 enum machine_mode dummy_mode;
1218 int dummy_int;
1219
1220 /* Never happens in little-endian mode. */
1221 if (!BYTES_BIG_ENDIAN)
1222 return false;
1223
1224 /* Only composite types smaller than or equal to 16 bytes can
1225 be potentially returned in registers. */
1226 if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
1227 || int_size_in_bytes (valtype) <= 0
1228 || int_size_in_bytes (valtype) > 16)
1229 return false;
1230
1231 /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
1232 or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
1233 is always passed/returned in the least significant bits of fp/simd
1234 register(s). */
1235 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
1236 &dummy_mode, &dummy_int, NULL))
1237 return false;
1238
1239 return true;
1240 }
1241
1242 /* Implement TARGET_FUNCTION_VALUE.
1243 Define how to find the value returned by a function. */
1244
1245 static rtx
1246 aarch64_function_value (const_tree type, const_tree func,
1247 bool outgoing ATTRIBUTE_UNUSED)
1248 {
1249 enum machine_mode mode;
1250 int unsignedp;
1251 int count;
1252 enum machine_mode ag_mode;
1253
1254 mode = TYPE_MODE (type);
1255 if (INTEGRAL_TYPE_P (type))
1256 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
1257
1258 if (aarch64_return_in_msb (type))
1259 {
1260 HOST_WIDE_INT size = int_size_in_bytes (type);
1261
1262 if (size % UNITS_PER_WORD != 0)
1263 {
1264 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
1265 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
1266 }
1267 }
1268
1269 if (aarch64_vfp_is_call_or_return_candidate (mode, type,
1270 &ag_mode, &count, NULL))
1271 {
1272 if (!aarch64_composite_type_p (type, mode))
1273 {
1274 gcc_assert (count == 1 && mode == ag_mode);
1275 return gen_rtx_REG (mode, V0_REGNUM);
1276 }
1277 else
1278 {
1279 int i;
1280 rtx par;
1281
1282 par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
1283 for (i = 0; i < count; i++)
1284 {
1285 rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
1286 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
1287 GEN_INT (i * GET_MODE_SIZE (ag_mode)));
1288 XVECEXP (par, 0, i) = tmp;
1289 }
1290 return par;
1291 }
1292 }
1293 else
1294 return gen_rtx_REG (mode, R0_REGNUM);
1295 }
1296
1297 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.
1298 Return true if REGNO is the number of a hard register in which the values
1299 of called function may come back. */
1300
1301 static bool
1302 aarch64_function_value_regno_p (const unsigned int regno)
1303 {
1304 /* Maximum of 16 bytes can be returned in the general registers. Examples
1305 of 16-byte return values are: 128-bit integers and 16-byte small
1306 structures (excluding homogeneous floating-point aggregates). */
1307 if (regno == R0_REGNUM || regno == R1_REGNUM)
1308 return true;
1309
1310 /* Up to four fp/simd registers can return a function value, e.g. a
1311 homogeneous floating-point aggregate having four members. */
1312 if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
1313 return !TARGET_GENERAL_REGS_ONLY;
1314
1315 return false;
1316 }
1317
1318 /* Implement TARGET_RETURN_IN_MEMORY.
1319
1320 If the type T of the result of a function is such that
1321 void func (T arg)
1322 would require that arg be passed as a value in a register (or set of
1323 registers) according to the parameter passing rules, then the result
1324 is returned in the same registers as would be used for such an
1325 argument. */
1326
1327 static bool
1328 aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
1329 {
1330 HOST_WIDE_INT size;
1331 enum machine_mode ag_mode;
1332 int count;
1333
1334 if (!AGGREGATE_TYPE_P (type)
1335 && TREE_CODE (type) != COMPLEX_TYPE
1336 && TREE_CODE (type) != VECTOR_TYPE)
1337 /* Simple scalar types always returned in registers. */
1338 return false;
1339
1340 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
1341 type,
1342 &ag_mode,
1343 &count,
1344 NULL))
1345 return false;
1346
1347 /* Types larger than 2 registers returned in memory. */
1348 size = int_size_in_bytes (type);
1349 return (size < 0 || size > 2 * UNITS_PER_WORD);
1350 }
1351
1352 static bool
1353 aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode,
1354 const_tree type, int *nregs)
1355 {
1356 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1357 return aarch64_vfp_is_call_or_return_candidate (mode,
1358 type,
1359 &pcum->aapcs_vfp_rmode,
1360 nregs,
1361 NULL);
1362 }
1363
1364 /* Given MODE and TYPE of a function argument, return the alignment in
1365 bits. The idea is to suppress any stronger alignment requested by
1366 the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
1367 This is a helper function for local use only. */
1368
1369 static unsigned int
1370 aarch64_function_arg_alignment (enum machine_mode mode, const_tree type)
1371 {
1372 unsigned int alignment;
1373
1374 if (type)
1375 {
1376 if (!integer_zerop (TYPE_SIZE (type)))
1377 {
1378 if (TYPE_MODE (type) == mode)
1379 alignment = TYPE_ALIGN (type);
1380 else
1381 alignment = GET_MODE_ALIGNMENT (mode);
1382 }
1383 else
1384 alignment = 0;
1385 }
1386 else
1387 alignment = GET_MODE_ALIGNMENT (mode);
1388
1389 return alignment;
1390 }
1391
1392 /* Layout a function argument according to the AAPCS64 rules. The rule
1393 numbers refer to the rule numbers in the AAPCS64. */
1394
1395 static void
1396 aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode,
1397 const_tree type,
1398 bool named ATTRIBUTE_UNUSED)
1399 {
1400 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1401 int ncrn, nvrn, nregs;
1402 bool allocate_ncrn, allocate_nvrn;
1403
1404 /* We need to do this once per argument. */
1405 if (pcum->aapcs_arg_processed)
1406 return;
1407
1408 pcum->aapcs_arg_processed = true;
1409
1410 allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
1411 allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
1412 mode,
1413 type,
1414 &nregs);
1415
1416 /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
1417 The following code thus handles passing by SIMD/FP registers first. */
1418
1419 nvrn = pcum->aapcs_nvrn;
1420
1421 /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
1422 and homogenous short-vector aggregates (HVA). */
1423 if (allocate_nvrn)
1424 {
1425 if (nvrn + nregs <= NUM_FP_ARG_REGS)
1426 {
1427 pcum->aapcs_nextnvrn = nvrn + nregs;
1428 if (!aarch64_composite_type_p (type, mode))
1429 {
1430 gcc_assert (nregs == 1);
1431 pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
1432 }
1433 else
1434 {
1435 rtx par;
1436 int i;
1437 par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
1438 for (i = 0; i < nregs; i++)
1439 {
1440 rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
1441 V0_REGNUM + nvrn + i);
1442 tmp = gen_rtx_EXPR_LIST
1443 (VOIDmode, tmp,
1444 GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
1445 XVECEXP (par, 0, i) = tmp;
1446 }
1447 pcum->aapcs_reg = par;
1448 }
1449 return;
1450 }
1451 else
1452 {
1453 /* C.3 NSRN is set to 8. */
1454 pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
1455 goto on_stack;
1456 }
1457 }
1458
1459 ncrn = pcum->aapcs_ncrn;
1460 nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode))
1461 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1462
1463
1464 /* C6 - C9. though the sign and zero extension semantics are
1465 handled elsewhere. This is the case where the argument fits
1466 entirely general registers. */
1467 if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
1468 {
1469 unsigned int alignment = aarch64_function_arg_alignment (mode, type);
1470
1471 gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
1472
1473 /* C.8 if the argument has an alignment of 16 then the NGRN is
1474 rounded up to the next even number. */
1475 if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
1476 {
1477 ++ncrn;
1478 gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
1479 }
1480 /* NREGS can be 0 when e.g. an empty structure is to be passed.
1481 A reg is still generated for it, but the caller should be smart
1482 enough not to use it. */
1483 if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
1484 {
1485 pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
1486 }
1487 else
1488 {
1489 rtx par;
1490 int i;
1491
1492 par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
1493 for (i = 0; i < nregs; i++)
1494 {
1495 rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
1496 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
1497 GEN_INT (i * UNITS_PER_WORD));
1498 XVECEXP (par, 0, i) = tmp;
1499 }
1500 pcum->aapcs_reg = par;
1501 }
1502
1503 pcum->aapcs_nextncrn = ncrn + nregs;
1504 return;
1505 }
1506
1507 /* C.11 */
1508 pcum->aapcs_nextncrn = NUM_ARG_REGS;
1509
1510 /* The argument is passed on stack; record the needed number of words for
1511 this argument (we can re-use NREGS) and align the total size if
1512 necessary. */
1513 on_stack:
1514 pcum->aapcs_stack_words = nregs;
1515 if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
1516 pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size,
1517 16 / UNITS_PER_WORD) + 1;
1518 return;
1519 }
1520
1521 /* Implement TARGET_FUNCTION_ARG. */
1522
1523 static rtx
1524 aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode,
1525 const_tree type, bool named)
1526 {
1527 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1528 gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
1529
1530 if (mode == VOIDmode)
1531 return NULL_RTX;
1532
1533 aarch64_layout_arg (pcum_v, mode, type, named);
1534 return pcum->aapcs_reg;
1535 }
1536
1537 void
1538 aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
1539 const_tree fntype ATTRIBUTE_UNUSED,
1540 rtx libname ATTRIBUTE_UNUSED,
1541 const_tree fndecl ATTRIBUTE_UNUSED,
1542 unsigned n_named ATTRIBUTE_UNUSED)
1543 {
1544 pcum->aapcs_ncrn = 0;
1545 pcum->aapcs_nvrn = 0;
1546 pcum->aapcs_nextncrn = 0;
1547 pcum->aapcs_nextnvrn = 0;
1548 pcum->pcs_variant = ARM_PCS_AAPCS64;
1549 pcum->aapcs_reg = NULL_RTX;
1550 pcum->aapcs_arg_processed = false;
1551 pcum->aapcs_stack_words = 0;
1552 pcum->aapcs_stack_size = 0;
1553
1554 return;
1555 }
1556
1557 static void
1558 aarch64_function_arg_advance (cumulative_args_t pcum_v,
1559 enum machine_mode mode,
1560 const_tree type,
1561 bool named)
1562 {
1563 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1564 if (pcum->pcs_variant == ARM_PCS_AAPCS64)
1565 {
1566 aarch64_layout_arg (pcum_v, mode, type, named);
1567 gcc_assert ((pcum->aapcs_reg != NULL_RTX)
1568 != (pcum->aapcs_stack_words != 0));
1569 pcum->aapcs_arg_processed = false;
1570 pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
1571 pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
1572 pcum->aapcs_stack_size += pcum->aapcs_stack_words;
1573 pcum->aapcs_stack_words = 0;
1574 pcum->aapcs_reg = NULL_RTX;
1575 }
1576 }
1577
1578 bool
1579 aarch64_function_arg_regno_p (unsigned regno)
1580 {
1581 return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
1582 || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
1583 }
1584
1585 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
1586 PARM_BOUNDARY bits of alignment, but will be given anything up
1587 to STACK_BOUNDARY bits if the type requires it. This makes sure
1588 that both before and after the layout of each argument, the Next
1589 Stacked Argument Address (NSAA) will have a minimum alignment of
1590 8 bytes. */
1591
1592 static unsigned int
1593 aarch64_function_arg_boundary (enum machine_mode mode, const_tree type)
1594 {
1595 unsigned int alignment = aarch64_function_arg_alignment (mode, type);
1596
1597 if (alignment < PARM_BOUNDARY)
1598 alignment = PARM_BOUNDARY;
1599 if (alignment > STACK_BOUNDARY)
1600 alignment = STACK_BOUNDARY;
1601 return alignment;
1602 }
1603
1604 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
1605
1606 Return true if an argument passed on the stack should be padded upwards,
1607 i.e. if the least-significant byte of the stack slot has useful data.
1608
1609 Small aggregate types are placed in the lowest memory address.
1610
1611 The related parameter passing rules are B.4, C.3, C.5 and C.14. */
1612
1613 bool
1614 aarch64_pad_arg_upward (enum machine_mode mode, const_tree type)
1615 {
1616 /* On little-endian targets, the least significant byte of every stack
1617 argument is passed at the lowest byte address of the stack slot. */
1618 if (!BYTES_BIG_ENDIAN)
1619 return true;
1620
1621 /* Otherwise, integral, floating-point and pointer types are padded downward:
1622 the least significant byte of a stack argument is passed at the highest
1623 byte address of the stack slot. */
1624 if (type
1625 ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)
1626 || POINTER_TYPE_P (type))
1627 : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
1628 return false;
1629
1630 /* Everything else padded upward, i.e. data in first byte of stack slot. */
1631 return true;
1632 }
1633
1634 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
1635
1636 It specifies padding for the last (may also be the only)
1637 element of a block move between registers and memory. If
1638 assuming the block is in the memory, padding upward means that
1639 the last element is padded after its highest significant byte,
1640 while in downward padding, the last element is padded at the
1641 its least significant byte side.
1642
1643 Small aggregates and small complex types are always padded
1644 upwards.
1645
1646 We don't need to worry about homogeneous floating-point or
1647 short-vector aggregates; their move is not affected by the
1648 padding direction determined here. Regardless of endianness,
1649 each element of such an aggregate is put in the least
1650 significant bits of a fp/simd register.
1651
1652 Return !BYTES_BIG_ENDIAN if the least significant byte of the
1653 register has useful data, and return the opposite if the most
1654 significant byte does. */
1655
1656 bool
1657 aarch64_pad_reg_upward (enum machine_mode mode, const_tree type,
1658 bool first ATTRIBUTE_UNUSED)
1659 {
1660
1661 /* Small composite types are always padded upward. */
1662 if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
1663 {
1664 HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
1665 : GET_MODE_SIZE (mode));
1666 if (size < 2 * UNITS_PER_WORD)
1667 return true;
1668 }
1669
1670 /* Otherwise, use the default padding. */
1671 return !BYTES_BIG_ENDIAN;
1672 }
1673
1674 static enum machine_mode
1675 aarch64_libgcc_cmp_return_mode (void)
1676 {
1677 return SImode;
1678 }
1679
1680 static bool
1681 aarch64_frame_pointer_required (void)
1682 {
1683 /* If the function contains dynamic stack allocations, we need to
1684 use the frame pointer to access the static parts of the frame. */
1685 if (cfun->calls_alloca)
1686 return true;
1687
1688 /* We may have turned flag_omit_frame_pointer on in order to have this
1689 function called; if we did, we also set the 'faked_omit_frame_pointer' flag
1690 and we'll check it here.
1691 If we really did set flag_omit_frame_pointer normally, then we return false
1692 (no frame pointer required) in all cases. */
1693
1694 if (flag_omit_frame_pointer && !faked_omit_frame_pointer)
1695 return false;
1696 else if (flag_omit_leaf_frame_pointer)
1697 return !crtl->is_leaf;
1698 return true;
1699 }
1700
1701 /* Mark the registers that need to be saved by the callee and calculate
1702 the size of the callee-saved registers area and frame record (both FP
1703 and LR may be omitted). */
1704 static void
1705 aarch64_layout_frame (void)
1706 {
1707 HOST_WIDE_INT offset = 0;
1708 int regno;
1709
1710 if (reload_completed && cfun->machine->frame.laid_out)
1711 return;
1712
1713 cfun->machine->frame.fp_lr_offset = 0;
1714
1715 /* First mark all the registers that really need to be saved... */
1716 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
1717 cfun->machine->frame.reg_offset[regno] = -1;
1718
1719 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1720 cfun->machine->frame.reg_offset[regno] = -1;
1721
1722 /* ... that includes the eh data registers (if needed)... */
1723 if (crtl->calls_eh_return)
1724 for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
1725 cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0;
1726
1727 /* ... and any callee saved register that dataflow says is live. */
1728 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
1729 if (df_regs_ever_live_p (regno)
1730 && !call_used_regs[regno])
1731 cfun->machine->frame.reg_offset[regno] = 0;
1732
1733 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1734 if (df_regs_ever_live_p (regno)
1735 && !call_used_regs[regno])
1736 cfun->machine->frame.reg_offset[regno] = 0;
1737
1738 if (frame_pointer_needed)
1739 {
1740 cfun->machine->frame.reg_offset[R30_REGNUM] = 0;
1741 cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
1742 cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD;
1743 }
1744
1745 /* Now assign stack slots for them. */
1746 for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++)
1747 if (cfun->machine->frame.reg_offset[regno] != -1)
1748 {
1749 cfun->machine->frame.reg_offset[regno] = offset;
1750 offset += UNITS_PER_WORD;
1751 }
1752
1753 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1754 if (cfun->machine->frame.reg_offset[regno] != -1)
1755 {
1756 cfun->machine->frame.reg_offset[regno] = offset;
1757 offset += UNITS_PER_WORD;
1758 }
1759
1760 if (frame_pointer_needed)
1761 {
1762 cfun->machine->frame.reg_offset[R29_REGNUM] = offset;
1763 offset += UNITS_PER_WORD;
1764 cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD;
1765 }
1766
1767 if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1)
1768 {
1769 cfun->machine->frame.reg_offset[R30_REGNUM] = offset;
1770 offset += UNITS_PER_WORD;
1771 cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD;
1772 }
1773
1774 cfun->machine->frame.padding0 =
1775 (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset);
1776 offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
1777
1778 cfun->machine->frame.saved_regs_size = offset;
1779 cfun->machine->frame.laid_out = true;
1780 }
1781
1782 /* Make the last instruction frame-related and note that it performs
1783 the operation described by FRAME_PATTERN. */
1784
1785 static void
1786 aarch64_set_frame_expr (rtx frame_pattern)
1787 {
1788 rtx insn;
1789
1790 insn = get_last_insn ();
1791 RTX_FRAME_RELATED_P (insn) = 1;
1792 RTX_FRAME_RELATED_P (frame_pattern) = 1;
1793 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1794 frame_pattern,
1795 REG_NOTES (insn));
1796 }
1797
1798 static bool
1799 aarch64_register_saved_on_entry (int regno)
1800 {
1801 return cfun->machine->frame.reg_offset[regno] != -1;
1802 }
1803
1804
1805 static void
1806 aarch64_save_or_restore_fprs (int start_offset, int increment,
1807 bool restore, rtx base_rtx)
1808
1809 {
1810 unsigned regno;
1811 unsigned regno2;
1812 rtx insn;
1813 rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
1814
1815
1816 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1817 {
1818 if (aarch64_register_saved_on_entry (regno))
1819 {
1820 rtx mem;
1821 mem = gen_mem_ref (DFmode,
1822 plus_constant (Pmode,
1823 base_rtx,
1824 start_offset));
1825
1826 for (regno2 = regno + 1;
1827 regno2 <= V31_REGNUM
1828 && !aarch64_register_saved_on_entry (regno2);
1829 regno2++)
1830 {
1831 /* Empty loop. */
1832 }
1833 if (regno2 <= V31_REGNUM &&
1834 aarch64_register_saved_on_entry (regno2))
1835 {
1836 rtx mem2;
1837 /* Next highest register to be saved. */
1838 mem2 = gen_mem_ref (DFmode,
1839 plus_constant
1840 (Pmode,
1841 base_rtx,
1842 start_offset + increment));
1843 if (restore == false)
1844 {
1845 insn = emit_insn
1846 ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno),
1847 mem2, gen_rtx_REG (DFmode, regno2)));
1848
1849 }
1850 else
1851 {
1852 insn = emit_insn
1853 ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
1854 gen_rtx_REG (DFmode, regno2), mem2));
1855
1856 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno));
1857 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2));
1858 }
1859
1860 /* The first part of a frame-related parallel insn
1861 is always assumed to be relevant to the frame
1862 calculations; subsequent parts, are only
1863 frame-related if explicitly marked. */
1864 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
1865 1)) = 1;
1866 regno = regno2;
1867 start_offset += increment * 2;
1868 }
1869 else
1870 {
1871 if (restore == false)
1872 insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno));
1873 else
1874 {
1875 insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
1876 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
1877 }
1878 start_offset += increment;
1879 }
1880 RTX_FRAME_RELATED_P (insn) = 1;
1881 }
1882 }
1883
1884 }
1885
1886
1887 /* offset from the stack pointer of where the saves and
1888 restore's have to happen. */
1889 static void
1890 aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
1891 bool restore)
1892 {
1893 rtx insn;
1894 rtx base_rtx = stack_pointer_rtx;
1895 HOST_WIDE_INT start_offset = offset;
1896 HOST_WIDE_INT increment = UNITS_PER_WORD;
1897 rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
1898 unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM;
1899 unsigned regno;
1900 unsigned regno2;
1901
1902 for (regno = R0_REGNUM; regno <= limit; regno++)
1903 {
1904 if (aarch64_register_saved_on_entry (regno))
1905 {
1906 rtx mem;
1907 mem = gen_mem_ref (Pmode,
1908 plus_constant (Pmode,
1909 base_rtx,
1910 start_offset));
1911
1912 for (regno2 = regno + 1;
1913 regno2 <= limit
1914 && !aarch64_register_saved_on_entry (regno2);
1915 regno2++)
1916 {
1917 /* Empty loop. */
1918 }
1919 if (regno2 <= limit &&
1920 aarch64_register_saved_on_entry (regno2))
1921 {
1922 rtx mem2;
1923 /* Next highest register to be saved. */
1924 mem2 = gen_mem_ref (Pmode,
1925 plus_constant
1926 (Pmode,
1927 base_rtx,
1928 start_offset + increment));
1929 if (restore == false)
1930 {
1931 insn = emit_insn
1932 ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno),
1933 mem2, gen_rtx_REG (DImode, regno2)));
1934
1935 }
1936 else
1937 {
1938 insn = emit_insn
1939 ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem,
1940 gen_rtx_REG (DImode, regno2), mem2));
1941
1942 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
1943 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2));
1944 }
1945
1946 /* The first part of a frame-related parallel insn
1947 is always assumed to be relevant to the frame
1948 calculations; subsequent parts, are only
1949 frame-related if explicitly marked. */
1950 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
1951 1)) = 1;
1952 regno = regno2;
1953 start_offset += increment * 2;
1954 }
1955 else
1956 {
1957 if (restore == false)
1958 insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
1959 else
1960 {
1961 insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
1962 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
1963 }
1964 start_offset += increment;
1965 }
1966 RTX_FRAME_RELATED_P (insn) = 1;
1967 }
1968 }
1969
1970 aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx);
1971
1972 }
1973
1974 /* AArch64 stack frames generated by this compiler look like:
1975
1976 +-------------------------------+
1977 | |
1978 | incoming stack arguments |
1979 | |
1980 +-------------------------------+ <-- arg_pointer_rtx
1981 | |
1982 | callee-allocated save area |
1983 | for register varargs |
1984 | |
1985 +-------------------------------+
1986 | |
1987 | local variables |
1988 | |
1989 +-------------------------------+ <-- frame_pointer_rtx
1990 | |
1991 | callee-saved registers |
1992 | |
1993 +-------------------------------+
1994 | LR' |
1995 +-------------------------------+
1996 | FP' |
1997 P +-------------------------------+ <-- hard_frame_pointer_rtx
1998 | dynamic allocation |
1999 +-------------------------------+
2000 | |
2001 | outgoing stack arguments |
2002 | |
2003 +-------------------------------+ <-- stack_pointer_rtx
2004
2005 Dynamic stack allocations such as alloca insert data at point P.
2006 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
2007 hard_frame_pointer_rtx unchanged. */
2008
2009 /* Generate the prologue instructions for entry into a function.
2010 Establish the stack frame by decreasing the stack pointer with a
2011 properly calculated size and, if necessary, create a frame record
2012 filled with the values of LR and previous frame pointer. The
2013 current FP is also set up is it is in use. */
2014
2015 void
2016 aarch64_expand_prologue (void)
2017 {
2018 /* sub sp, sp, #<frame_size>
2019 stp {fp, lr}, [sp, #<frame_size> - 16]
2020 add fp, sp, #<frame_size> - hardfp_offset
2021 stp {cs_reg}, [fp, #-16] etc.
2022
2023 sub sp, sp, <final_adjustment_if_any>
2024 */
2025 HOST_WIDE_INT original_frame_size; /* local variables + vararg save */
2026 HOST_WIDE_INT frame_size, offset;
2027 HOST_WIDE_INT fp_offset; /* FP offset from SP */
2028 rtx insn;
2029
2030 aarch64_layout_frame ();
2031 original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
2032 gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg)
2033 && (cfun->stdarg || !cfun->machine->saved_varargs_size));
2034 frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
2035 + crtl->outgoing_args_size);
2036 offset = frame_size = AARCH64_ROUND_UP (frame_size,
2037 STACK_BOUNDARY / BITS_PER_UNIT);
2038
2039 if (flag_stack_usage_info)
2040 current_function_static_stack_size = frame_size;
2041
2042 fp_offset = (offset
2043 - original_frame_size
2044 - cfun->machine->frame.saved_regs_size);
2045
2046 /* Store pairs and load pairs have a range only -512 to 504. */
2047 if (offset >= 512)
2048 {
2049 /* When the frame has a large size, an initial decrease is done on
2050 the stack pointer to jump over the callee-allocated save area for
2051 register varargs, the local variable area and/or the callee-saved
2052 register area. This will allow the pre-index write-back
2053 store pair instructions to be used for setting up the stack frame
2054 efficiently. */
2055 offset = original_frame_size + cfun->machine->frame.saved_regs_size;
2056 if (offset >= 512)
2057 offset = cfun->machine->frame.saved_regs_size;
2058
2059 frame_size -= (offset + crtl->outgoing_args_size);
2060 fp_offset = 0;
2061
2062 if (frame_size >= 0x1000000)
2063 {
2064 rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
2065 emit_move_insn (op0, GEN_INT (-frame_size));
2066 emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
2067 aarch64_set_frame_expr (gen_rtx_SET
2068 (Pmode, stack_pointer_rtx,
2069 plus_constant (Pmode,
2070 stack_pointer_rtx,
2071 -frame_size)));
2072 }
2073 else if (frame_size > 0)
2074 {
2075 if ((frame_size & 0xfff) != frame_size)
2076 {
2077 insn = emit_insn (gen_add2_insn
2078 (stack_pointer_rtx,
2079 GEN_INT (-(frame_size
2080 & ~(HOST_WIDE_INT)0xfff))));
2081 RTX_FRAME_RELATED_P (insn) = 1;
2082 }
2083 if ((frame_size & 0xfff) != 0)
2084 {
2085 insn = emit_insn (gen_add2_insn
2086 (stack_pointer_rtx,
2087 GEN_INT (-(frame_size
2088 & (HOST_WIDE_INT)0xfff))));
2089 RTX_FRAME_RELATED_P (insn) = 1;
2090 }
2091 }
2092 }
2093 else
2094 frame_size = -1;
2095
2096 if (offset > 0)
2097 {
2098 /* Save the frame pointer and lr if the frame pointer is needed
2099 first. Make the frame pointer point to the location of the
2100 old frame pointer on the stack. */
2101 if (frame_pointer_needed)
2102 {
2103 rtx mem_fp, mem_lr;
2104
2105 if (fp_offset)
2106 {
2107 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2108 GEN_INT (-offset)));
2109 RTX_FRAME_RELATED_P (insn) = 1;
2110 aarch64_set_frame_expr (gen_rtx_SET
2111 (Pmode, stack_pointer_rtx,
2112 gen_rtx_MINUS (Pmode,
2113 stack_pointer_rtx,
2114 GEN_INT (offset))));
2115 mem_fp = gen_frame_mem (DImode,
2116 plus_constant (Pmode,
2117 stack_pointer_rtx,
2118 fp_offset));
2119 mem_lr = gen_frame_mem (DImode,
2120 plus_constant (Pmode,
2121 stack_pointer_rtx,
2122 fp_offset
2123 + UNITS_PER_WORD));
2124 insn = emit_insn (gen_store_pairdi (mem_fp,
2125 hard_frame_pointer_rtx,
2126 mem_lr,
2127 gen_rtx_REG (DImode,
2128 LR_REGNUM)));
2129 }
2130 else
2131 {
2132 insn = emit_insn (gen_storewb_pairdi_di
2133 (stack_pointer_rtx, stack_pointer_rtx,
2134 hard_frame_pointer_rtx,
2135 gen_rtx_REG (DImode, LR_REGNUM),
2136 GEN_INT (-offset),
2137 GEN_INT (GET_MODE_SIZE (DImode) - offset)));
2138 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
2139 }
2140
2141 /* The first part of a frame-related parallel insn is always
2142 assumed to be relevant to the frame calculations;
2143 subsequent parts, are only frame-related if explicitly
2144 marked. */
2145 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
2146 RTX_FRAME_RELATED_P (insn) = 1;
2147
2148 /* Set up frame pointer to point to the location of the
2149 previous frame pointer on the stack. */
2150 insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
2151 stack_pointer_rtx,
2152 GEN_INT (fp_offset)));
2153 aarch64_set_frame_expr (gen_rtx_SET
2154 (Pmode, hard_frame_pointer_rtx,
2155 plus_constant (Pmode,
2156 stack_pointer_rtx,
2157 fp_offset)));
2158 RTX_FRAME_RELATED_P (insn) = 1;
2159 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
2160 hard_frame_pointer_rtx));
2161 }
2162 else
2163 {
2164 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2165 GEN_INT (-offset)));
2166 RTX_FRAME_RELATED_P (insn) = 1;
2167 }
2168
2169 aarch64_save_or_restore_callee_save_registers
2170 (fp_offset + cfun->machine->frame.hardfp_offset, 0);
2171 }
2172
2173 /* when offset >= 512,
2174 sub sp, sp, #<outgoing_args_size> */
2175 if (frame_size > -1)
2176 {
2177 if (crtl->outgoing_args_size > 0)
2178 {
2179 insn = emit_insn (gen_add2_insn
2180 (stack_pointer_rtx,
2181 GEN_INT (- crtl->outgoing_args_size)));
2182 RTX_FRAME_RELATED_P (insn) = 1;
2183 }
2184 }
2185 }
2186
2187 /* Generate the epilogue instructions for returning from a function. */
2188 void
2189 aarch64_expand_epilogue (bool for_sibcall)
2190 {
2191 HOST_WIDE_INT original_frame_size, frame_size, offset;
2192 HOST_WIDE_INT fp_offset;
2193 rtx insn;
2194 rtx cfa_reg;
2195
2196 aarch64_layout_frame ();
2197 original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
2198 frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
2199 + crtl->outgoing_args_size);
2200 offset = frame_size = AARCH64_ROUND_UP (frame_size,
2201 STACK_BOUNDARY / BITS_PER_UNIT);
2202
2203 fp_offset = (offset
2204 - original_frame_size
2205 - cfun->machine->frame.saved_regs_size);
2206
2207 cfa_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
2208
2209 /* Store pairs and load pairs have a range only -512 to 504. */
2210 if (offset >= 512)
2211 {
2212 offset = original_frame_size + cfun->machine->frame.saved_regs_size;
2213 if (offset >= 512)
2214 offset = cfun->machine->frame.saved_regs_size;
2215
2216 frame_size -= (offset + crtl->outgoing_args_size);
2217 fp_offset = 0;
2218 if (!frame_pointer_needed && crtl->outgoing_args_size > 0)
2219 {
2220 insn = emit_insn (gen_add2_insn
2221 (stack_pointer_rtx,
2222 GEN_INT (crtl->outgoing_args_size)));
2223 RTX_FRAME_RELATED_P (insn) = 1;
2224 }
2225 }
2226 else
2227 frame_size = -1;
2228
2229 /* If there were outgoing arguments or we've done dynamic stack
2230 allocation, then restore the stack pointer from the frame
2231 pointer. This is at most one insn and more efficient than using
2232 GCC's internal mechanism. */
2233 if (frame_pointer_needed
2234 && (crtl->outgoing_args_size || cfun->calls_alloca))
2235 {
2236 insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
2237 hard_frame_pointer_rtx,
2238 GEN_INT (- fp_offset)));
2239 RTX_FRAME_RELATED_P (insn) = 1;
2240 /* As SP is set to (FP - fp_offset), according to the rules in
2241 dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated
2242 from the value of SP from now on. */
2243 cfa_reg = stack_pointer_rtx;
2244 }
2245
2246 aarch64_save_or_restore_callee_save_registers
2247 (fp_offset + cfun->machine->frame.hardfp_offset, 1);
2248
2249 /* Restore the frame pointer and lr if the frame pointer is needed. */
2250 if (offset > 0)
2251 {
2252 if (frame_pointer_needed)
2253 {
2254 rtx mem_fp, mem_lr;
2255
2256 if (fp_offset)
2257 {
2258 mem_fp = gen_frame_mem (DImode,
2259 plus_constant (Pmode,
2260 stack_pointer_rtx,
2261 fp_offset));
2262 mem_lr = gen_frame_mem (DImode,
2263 plus_constant (Pmode,
2264 stack_pointer_rtx,
2265 fp_offset
2266 + UNITS_PER_WORD));
2267 insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx,
2268 mem_fp,
2269 gen_rtx_REG (DImode,
2270 LR_REGNUM),
2271 mem_lr));
2272 }
2273 else
2274 {
2275 insn = emit_insn (gen_loadwb_pairdi_di
2276 (stack_pointer_rtx,
2277 stack_pointer_rtx,
2278 hard_frame_pointer_rtx,
2279 gen_rtx_REG (DImode, LR_REGNUM),
2280 GEN_INT (offset),
2281 GEN_INT (GET_MODE_SIZE (DImode) + offset)));
2282 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
2283 add_reg_note (insn, REG_CFA_ADJUST_CFA,
2284 (gen_rtx_SET (Pmode, stack_pointer_rtx,
2285 plus_constant (Pmode, cfa_reg,
2286 offset))));
2287 }
2288
2289 /* The first part of a frame-related parallel insn
2290 is always assumed to be relevant to the frame
2291 calculations; subsequent parts, are only
2292 frame-related if explicitly marked. */
2293 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
2294 RTX_FRAME_RELATED_P (insn) = 1;
2295 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
2296 add_reg_note (insn, REG_CFA_RESTORE,
2297 gen_rtx_REG (DImode, LR_REGNUM));
2298
2299 if (fp_offset)
2300 {
2301 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2302 GEN_INT (offset)));
2303 RTX_FRAME_RELATED_P (insn) = 1;
2304 }
2305 }
2306 else
2307 {
2308 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2309 GEN_INT (offset)));
2310 RTX_FRAME_RELATED_P (insn) = 1;
2311 }
2312 }
2313
2314 /* Stack adjustment for exception handler. */
2315 if (crtl->calls_eh_return)
2316 {
2317 /* We need to unwind the stack by the offset computed by
2318 EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
2319 based on SP. Ideally we would update the SP and define the
2320 CFA along the lines of:
2321
2322 SP = SP + EH_RETURN_STACKADJ_RTX
2323 (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
2324
2325 However the dwarf emitter only understands a constant
2326 register offset.
2327
2328 The solution chosen here is to use the otherwise unused IP0
2329 as a temporary register to hold the current SP value. The
2330 CFA is described using IP0 then SP is modified. */
2331
2332 rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM);
2333
2334 insn = emit_move_insn (ip0, stack_pointer_rtx);
2335 add_reg_note (insn, REG_CFA_DEF_CFA, ip0);
2336 RTX_FRAME_RELATED_P (insn) = 1;
2337
2338 emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
2339
2340 /* Ensure the assignment to IP0 does not get optimized away. */
2341 emit_use (ip0);
2342 }
2343
2344 if (frame_size > -1)
2345 {
2346 if (frame_size >= 0x1000000)
2347 {
2348 rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
2349 emit_move_insn (op0, GEN_INT (frame_size));
2350 emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
2351 aarch64_set_frame_expr (gen_rtx_SET
2352 (Pmode, stack_pointer_rtx,
2353 plus_constant (Pmode,
2354 stack_pointer_rtx,
2355 frame_size)));
2356 }
2357 else if (frame_size > 0)
2358 {
2359 if ((frame_size & 0xfff) != 0)
2360 {
2361 insn = emit_insn (gen_add2_insn
2362 (stack_pointer_rtx,
2363 GEN_INT ((frame_size
2364 & (HOST_WIDE_INT) 0xfff))));
2365 RTX_FRAME_RELATED_P (insn) = 1;
2366 }
2367 if ((frame_size & 0xfff) != frame_size)
2368 {
2369 insn = emit_insn (gen_add2_insn
2370 (stack_pointer_rtx,
2371 GEN_INT ((frame_size
2372 & ~ (HOST_WIDE_INT) 0xfff))));
2373 RTX_FRAME_RELATED_P (insn) = 1;
2374 }
2375 }
2376
2377 aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx,
2378 plus_constant (Pmode,
2379 stack_pointer_rtx,
2380 offset)));
2381 }
2382
2383 emit_use (gen_rtx_REG (DImode, LR_REGNUM));
2384 if (!for_sibcall)
2385 emit_jump_insn (ret_rtx);
2386 }
2387
2388 /* Return the place to copy the exception unwinding return address to.
2389 This will probably be a stack slot, but could (in theory be the
2390 return register). */
2391 rtx
2392 aarch64_final_eh_return_addr (void)
2393 {
2394 HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset;
2395 aarch64_layout_frame ();
2396 original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
2397 frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
2398 + crtl->outgoing_args_size);
2399 offset = frame_size = AARCH64_ROUND_UP (frame_size,
2400 STACK_BOUNDARY / BITS_PER_UNIT);
2401 fp_offset = offset
2402 - original_frame_size
2403 - cfun->machine->frame.saved_regs_size;
2404
2405 if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
2406 return gen_rtx_REG (DImode, LR_REGNUM);
2407
2408 /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
2409 result in a store to save LR introduced by builtin_eh_return () being
2410 incorrectly deleted because the alias is not detected.
2411 So in the calculation of the address to copy the exception unwinding
2412 return address to, we note 2 cases.
2413 If FP is needed and the fp_offset is 0, it means that SP = FP and hence
2414 we return a SP-relative location since all the addresses are SP-relative
2415 in this case. This prevents the store from being optimized away.
2416 If the fp_offset is not 0, then the addresses will be FP-relative and
2417 therefore we return a FP-relative location. */
2418
2419 if (frame_pointer_needed)
2420 {
2421 if (fp_offset)
2422 return gen_frame_mem (DImode,
2423 plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
2424 else
2425 return gen_frame_mem (DImode,
2426 plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD));
2427 }
2428
2429 /* If FP is not needed, we calculate the location of LR, which would be
2430 at the top of the saved registers block. */
2431
2432 return gen_frame_mem (DImode,
2433 plus_constant (Pmode,
2434 stack_pointer_rtx,
2435 fp_offset
2436 + cfun->machine->frame.saved_regs_size
2437 - 2 * UNITS_PER_WORD));
2438 }
2439
2440 /* Output code to build up a constant in a register. */
2441 static void
2442 aarch64_build_constant (int regnum, HOST_WIDE_INT val)
2443 {
2444 if (aarch64_bitmask_imm (val, DImode))
2445 emit_move_insn (gen_rtx_REG (Pmode, regnum), GEN_INT (val));
2446 else
2447 {
2448 int i;
2449 int ncount = 0;
2450 int zcount = 0;
2451 HOST_WIDE_INT valp = val >> 16;
2452 HOST_WIDE_INT valm;
2453 HOST_WIDE_INT tval;
2454
2455 for (i = 16; i < 64; i += 16)
2456 {
2457 valm = (valp & 0xffff);
2458
2459 if (valm != 0)
2460 ++ zcount;
2461
2462 if (valm != 0xffff)
2463 ++ ncount;
2464
2465 valp >>= 16;
2466 }
2467
2468 /* zcount contains the number of additional MOVK instructions
2469 required if the constant is built up with an initial MOVZ instruction,
2470 while ncount is the number of MOVK instructions required if starting
2471 with a MOVN instruction. Choose the sequence that yields the fewest
2472 number of instructions, preferring MOVZ instructions when they are both
2473 the same. */
2474 if (ncount < zcount)
2475 {
2476 emit_move_insn (gen_rtx_REG (Pmode, regnum),
2477 GEN_INT ((~val) & 0xffff));
2478 tval = 0xffff;
2479 }
2480 else
2481 {
2482 emit_move_insn (gen_rtx_REG (Pmode, regnum),
2483 GEN_INT (val & 0xffff));
2484 tval = 0;
2485 }
2486
2487 val >>= 16;
2488
2489 for (i = 16; i < 64; i += 16)
2490 {
2491 if ((val & 0xffff) != tval)
2492 emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode, regnum),
2493 GEN_INT (i), GEN_INT (val & 0xffff)));
2494 val >>= 16;
2495 }
2496 }
2497 }
2498
2499 static void
2500 aarch64_add_constant (int regnum, int scratchreg, HOST_WIDE_INT delta)
2501 {
2502 HOST_WIDE_INT mdelta = delta;
2503 rtx this_rtx = gen_rtx_REG (Pmode, regnum);
2504 rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg);
2505
2506 if (mdelta < 0)
2507 mdelta = -mdelta;
2508
2509 if (mdelta >= 4096 * 4096)
2510 {
2511 aarch64_build_constant (scratchreg, delta);
2512 emit_insn (gen_add3_insn (this_rtx, this_rtx, scratch_rtx));
2513 }
2514 else if (mdelta > 0)
2515 {
2516 if (mdelta >= 4096)
2517 {
2518 emit_insn (gen_rtx_SET (Pmode, scratch_rtx, GEN_INT (mdelta / 4096)));
2519 rtx shift = gen_rtx_ASHIFT (Pmode, scratch_rtx, GEN_INT (12));
2520 if (delta < 0)
2521 emit_insn (gen_rtx_SET (Pmode, this_rtx,
2522 gen_rtx_MINUS (Pmode, this_rtx, shift)));
2523 else
2524 emit_insn (gen_rtx_SET (Pmode, this_rtx,
2525 gen_rtx_PLUS (Pmode, this_rtx, shift)));
2526 }
2527 if (mdelta % 4096 != 0)
2528 {
2529 scratch_rtx = GEN_INT ((delta < 0 ? -1 : 1) * (mdelta % 4096));
2530 emit_insn (gen_rtx_SET (Pmode, this_rtx,
2531 gen_rtx_PLUS (Pmode, this_rtx, scratch_rtx)));
2532 }
2533 }
2534 }
2535
2536 /* Output code to add DELTA to the first argument, and then jump
2537 to FUNCTION. Used for C++ multiple inheritance. */
2538 static void
2539 aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
2540 HOST_WIDE_INT delta,
2541 HOST_WIDE_INT vcall_offset,
2542 tree function)
2543 {
2544 /* The this pointer is always in x0. Note that this differs from
2545 Arm where the this pointer maybe bumped to r1 if r0 is required
2546 to return a pointer to an aggregate. On AArch64 a result value
2547 pointer will be in x8. */
2548 int this_regno = R0_REGNUM;
2549 rtx this_rtx, temp0, temp1, addr, insn, funexp;
2550
2551 reload_completed = 1;
2552 emit_note (NOTE_INSN_PROLOGUE_END);
2553
2554 if (vcall_offset == 0)
2555 aarch64_add_constant (this_regno, IP1_REGNUM, delta);
2556 else
2557 {
2558 gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0);
2559
2560 this_rtx = gen_rtx_REG (Pmode, this_regno);
2561 temp0 = gen_rtx_REG (Pmode, IP0_REGNUM);
2562 temp1 = gen_rtx_REG (Pmode, IP1_REGNUM);
2563
2564 addr = this_rtx;
2565 if (delta != 0)
2566 {
2567 if (delta >= -256 && delta < 256)
2568 addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx,
2569 plus_constant (Pmode, this_rtx, delta));
2570 else
2571 aarch64_add_constant (this_regno, IP1_REGNUM, delta);
2572 }
2573
2574 if (Pmode == ptr_mode)
2575 aarch64_emit_move (temp0, gen_rtx_MEM (ptr_mode, addr));
2576 else
2577 aarch64_emit_move (temp0,
2578 gen_rtx_ZERO_EXTEND (Pmode,
2579 gen_rtx_MEM (ptr_mode, addr)));
2580
2581 if (vcall_offset >= -256 && vcall_offset < 4096 * POINTER_BYTES)
2582 addr = plus_constant (Pmode, temp0, vcall_offset);
2583 else
2584 {
2585 aarch64_build_constant (IP1_REGNUM, vcall_offset);
2586 addr = gen_rtx_PLUS (Pmode, temp0, temp1);
2587 }
2588
2589 if (Pmode == ptr_mode)
2590 aarch64_emit_move (temp1, gen_rtx_MEM (ptr_mode,addr));
2591 else
2592 aarch64_emit_move (temp1,
2593 gen_rtx_SIGN_EXTEND (Pmode,
2594 gen_rtx_MEM (ptr_mode, addr)));
2595
2596 emit_insn (gen_add2_insn (this_rtx, temp1));
2597 }
2598
2599 /* Generate a tail call to the target function. */
2600 if (!TREE_USED (function))
2601 {
2602 assemble_external (function);
2603 TREE_USED (function) = 1;
2604 }
2605 funexp = XEXP (DECL_RTL (function), 0);
2606 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
2607 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
2608 SIBLING_CALL_P (insn) = 1;
2609
2610 insn = get_insns ();
2611 shorten_branches (insn);
2612 final_start_function (insn, file, 1);
2613 final (insn, file, 1);
2614 final_end_function ();
2615
2616 /* Stop pretending to be a post-reload pass. */
2617 reload_completed = 0;
2618 }
2619
2620 static int
2621 aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
2622 {
2623 if (GET_CODE (*x) == SYMBOL_REF)
2624 return SYMBOL_REF_TLS_MODEL (*x) != 0;
2625
2626 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
2627 TLS offsets, not real symbol references. */
2628 if (GET_CODE (*x) == UNSPEC
2629 && XINT (*x, 1) == UNSPEC_TLS)
2630 return -1;
2631
2632 return 0;
2633 }
2634
2635 static bool
2636 aarch64_tls_referenced_p (rtx x)
2637 {
2638 if (!TARGET_HAVE_TLS)
2639 return false;
2640
2641 return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL);
2642 }
2643
2644
2645 static int
2646 aarch64_bitmasks_cmp (const void *i1, const void *i2)
2647 {
2648 const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1;
2649 const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2;
2650
2651 if (*imm1 < *imm2)
2652 return -1;
2653 if (*imm1 > *imm2)
2654 return +1;
2655 return 0;
2656 }
2657
2658
2659 static void
2660 aarch64_build_bitmask_table (void)
2661 {
2662 unsigned HOST_WIDE_INT mask, imm;
2663 unsigned int log_e, e, s, r;
2664 unsigned int nimms = 0;
2665
2666 for (log_e = 1; log_e <= 6; log_e++)
2667 {
2668 e = 1 << log_e;
2669 if (e == 64)
2670 mask = ~(HOST_WIDE_INT) 0;
2671 else
2672 mask = ((HOST_WIDE_INT) 1 << e) - 1;
2673 for (s = 1; s < e; s++)
2674 {
2675 for (r = 0; r < e; r++)
2676 {
2677 /* set s consecutive bits to 1 (s < 64) */
2678 imm = ((unsigned HOST_WIDE_INT)1 << s) - 1;
2679 /* rotate right by r */
2680 if (r != 0)
2681 imm = ((imm >> r) | (imm << (e - r))) & mask;
2682 /* replicate the constant depending on SIMD size */
2683 switch (log_e) {
2684 case 1: imm |= (imm << 2);
2685 case 2: imm |= (imm << 4);
2686 case 3: imm |= (imm << 8);
2687 case 4: imm |= (imm << 16);
2688 case 5: imm |= (imm << 32);
2689 case 6:
2690 break;
2691 default:
2692 gcc_unreachable ();
2693 }
2694 gcc_assert (nimms < AARCH64_NUM_BITMASKS);
2695 aarch64_bitmasks[nimms++] = imm;
2696 }
2697 }
2698 }
2699
2700 gcc_assert (nimms == AARCH64_NUM_BITMASKS);
2701 qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]),
2702 aarch64_bitmasks_cmp);
2703 }
2704
2705
2706 /* Return true if val can be encoded as a 12-bit unsigned immediate with
2707 a left shift of 0 or 12 bits. */
2708 bool
2709 aarch64_uimm12_shift (HOST_WIDE_INT val)
2710 {
2711 return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
2712 || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
2713 );
2714 }
2715
2716
2717 /* Return true if val is an immediate that can be loaded into a
2718 register by a MOVZ instruction. */
2719 static bool
2720 aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode)
2721 {
2722 if (GET_MODE_SIZE (mode) > 4)
2723 {
2724 if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
2725 || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
2726 return 1;
2727 }
2728 else
2729 {
2730 /* Ignore sign extension. */
2731 val &= (HOST_WIDE_INT) 0xffffffff;
2732 }
2733 return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
2734 || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
2735 }
2736
2737
2738 /* Return true if val is a valid bitmask immediate. */
2739 bool
2740 aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode)
2741 {
2742 if (GET_MODE_SIZE (mode) < 8)
2743 {
2744 /* Replicate bit pattern. */
2745 val &= (HOST_WIDE_INT) 0xffffffff;
2746 val |= val << 32;
2747 }
2748 return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS,
2749 sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL;
2750 }
2751
2752
2753 /* Return true if val is an immediate that can be loaded into a
2754 register in a single instruction. */
2755 bool
2756 aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode)
2757 {
2758 if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
2759 return 1;
2760 return aarch64_bitmask_imm (val, mode);
2761 }
2762
2763 static bool
2764 aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2765 {
2766 rtx base, offset;
2767
2768 if (GET_CODE (x) == HIGH)
2769 return true;
2770
2771 split_const (x, &base, &offset);
2772 if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
2773 {
2774 if (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR)
2775 != SYMBOL_FORCE_TO_MEM)
2776 return true;
2777 else
2778 /* Avoid generating a 64-bit relocation in ILP32; leave
2779 to aarch64_expand_mov_immediate to handle it properly. */
2780 return mode != ptr_mode;
2781 }
2782
2783 return aarch64_tls_referenced_p (x);
2784 }
2785
2786 /* Return true if register REGNO is a valid index register.
2787 STRICT_P is true if REG_OK_STRICT is in effect. */
2788
2789 bool
2790 aarch64_regno_ok_for_index_p (int regno, bool strict_p)
2791 {
2792 if (!HARD_REGISTER_NUM_P (regno))
2793 {
2794 if (!strict_p)
2795 return true;
2796
2797 if (!reg_renumber)
2798 return false;
2799
2800 regno = reg_renumber[regno];
2801 }
2802 return GP_REGNUM_P (regno);
2803 }
2804
2805 /* Return true if register REGNO is a valid base register for mode MODE.
2806 STRICT_P is true if REG_OK_STRICT is in effect. */
2807
2808 bool
2809 aarch64_regno_ok_for_base_p (int regno, bool strict_p)
2810 {
2811 if (!HARD_REGISTER_NUM_P (regno))
2812 {
2813 if (!strict_p)
2814 return true;
2815
2816 if (!reg_renumber)
2817 return false;
2818
2819 regno = reg_renumber[regno];
2820 }
2821
2822 /* The fake registers will be eliminated to either the stack or
2823 hard frame pointer, both of which are usually valid base registers.
2824 Reload deals with the cases where the eliminated form isn't valid. */
2825 return (GP_REGNUM_P (regno)
2826 || regno == SP_REGNUM
2827 || regno == FRAME_POINTER_REGNUM
2828 || regno == ARG_POINTER_REGNUM);
2829 }
2830
2831 /* Return true if X is a valid base register for mode MODE.
2832 STRICT_P is true if REG_OK_STRICT is in effect. */
2833
2834 static bool
2835 aarch64_base_register_rtx_p (rtx x, bool strict_p)
2836 {
2837 if (!strict_p && GET_CODE (x) == SUBREG)
2838 x = SUBREG_REG (x);
2839
2840 return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
2841 }
2842
2843 /* Return true if address offset is a valid index. If it is, fill in INFO
2844 appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
2845
2846 static bool
2847 aarch64_classify_index (struct aarch64_address_info *info, rtx x,
2848 enum machine_mode mode, bool strict_p)
2849 {
2850 enum aarch64_address_type type;
2851 rtx index;
2852 int shift;
2853
2854 /* (reg:P) */
2855 if ((REG_P (x) || GET_CODE (x) == SUBREG)
2856 && GET_MODE (x) == Pmode)
2857 {
2858 type = ADDRESS_REG_REG;
2859 index = x;
2860 shift = 0;
2861 }
2862 /* (sign_extend:DI (reg:SI)) */
2863 else if ((GET_CODE (x) == SIGN_EXTEND
2864 || GET_CODE (x) == ZERO_EXTEND)
2865 && GET_MODE (x) == DImode
2866 && GET_MODE (XEXP (x, 0)) == SImode)
2867 {
2868 type = (GET_CODE (x) == SIGN_EXTEND)
2869 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
2870 index = XEXP (x, 0);
2871 shift = 0;
2872 }
2873 /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
2874 else if (GET_CODE (x) == MULT
2875 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
2876 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
2877 && GET_MODE (XEXP (x, 0)) == DImode
2878 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
2879 && CONST_INT_P (XEXP (x, 1)))
2880 {
2881 type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2882 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
2883 index = XEXP (XEXP (x, 0), 0);
2884 shift = exact_log2 (INTVAL (XEXP (x, 1)));
2885 }
2886 /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
2887 else if (GET_CODE (x) == ASHIFT
2888 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
2889 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
2890 && GET_MODE (XEXP (x, 0)) == DImode
2891 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
2892 && CONST_INT_P (XEXP (x, 1)))
2893 {
2894 type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2895 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
2896 index = XEXP (XEXP (x, 0), 0);
2897 shift = INTVAL (XEXP (x, 1));
2898 }
2899 /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
2900 else if ((GET_CODE (x) == SIGN_EXTRACT
2901 || GET_CODE (x) == ZERO_EXTRACT)
2902 && GET_MODE (x) == DImode
2903 && GET_CODE (XEXP (x, 0)) == MULT
2904 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
2905 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
2906 {
2907 type = (GET_CODE (x) == SIGN_EXTRACT)
2908 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
2909 index = XEXP (XEXP (x, 0), 0);
2910 shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
2911 if (INTVAL (XEXP (x, 1)) != 32 + shift
2912 || INTVAL (XEXP (x, 2)) != 0)
2913 shift = -1;
2914 }
2915 /* (and:DI (mult:DI (reg:DI) (const_int scale))
2916 (const_int 0xffffffff<<shift)) */
2917 else if (GET_CODE (x) == AND
2918 && GET_MODE (x) == DImode
2919 && GET_CODE (XEXP (x, 0)) == MULT
2920 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
2921 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
2922 && CONST_INT_P (XEXP (x, 1)))
2923 {
2924 type = ADDRESS_REG_UXTW;
2925 index = XEXP (XEXP (x, 0), 0);
2926 shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
2927 if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
2928 shift = -1;
2929 }
2930 /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
2931 else if ((GET_CODE (x) == SIGN_EXTRACT
2932 || GET_CODE (x) == ZERO_EXTRACT)
2933 && GET_MODE (x) == DImode
2934 && GET_CODE (XEXP (x, 0)) == ASHIFT
2935 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
2936 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
2937 {
2938 type = (GET_CODE (x) == SIGN_EXTRACT)
2939 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
2940 index = XEXP (XEXP (x, 0), 0);
2941 shift = INTVAL (XEXP (XEXP (x, 0), 1));
2942 if (INTVAL (XEXP (x, 1)) != 32 + shift
2943 || INTVAL (XEXP (x, 2)) != 0)
2944 shift = -1;
2945 }
2946 /* (and:DI (ashift:DI (reg:DI) (const_int shift))
2947 (const_int 0xffffffff<<shift)) */
2948 else if (GET_CODE (x) == AND
2949 && GET_MODE (x) == DImode
2950 && GET_CODE (XEXP (x, 0)) == ASHIFT
2951 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
2952 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
2953 && CONST_INT_P (XEXP (x, 1)))
2954 {
2955 type = ADDRESS_REG_UXTW;
2956 index = XEXP (XEXP (x, 0), 0);
2957 shift = INTVAL (XEXP (XEXP (x, 0), 1));
2958 if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
2959 shift = -1;
2960 }
2961 /* (mult:P (reg:P) (const_int scale)) */
2962 else if (GET_CODE (x) == MULT
2963 && GET_MODE (x) == Pmode
2964 && GET_MODE (XEXP (x, 0)) == Pmode
2965 && CONST_INT_P (XEXP (x, 1)))
2966 {
2967 type = ADDRESS_REG_REG;
2968 index = XEXP (x, 0);
2969 shift = exact_log2 (INTVAL (XEXP (x, 1)));
2970 }
2971 /* (ashift:P (reg:P) (const_int shift)) */
2972 else if (GET_CODE (x) == ASHIFT
2973 && GET_MODE (x) == Pmode
2974 && GET_MODE (XEXP (x, 0)) == Pmode
2975 && CONST_INT_P (XEXP (x, 1)))
2976 {
2977 type = ADDRESS_REG_REG;
2978 index = XEXP (x, 0);
2979 shift = INTVAL (XEXP (x, 1));
2980 }
2981 else
2982 return false;
2983
2984 if (GET_CODE (index) == SUBREG)
2985 index = SUBREG_REG (index);
2986
2987 if ((shift == 0 ||
2988 (shift > 0 && shift <= 3
2989 && (1 << shift) == GET_MODE_SIZE (mode)))
2990 && REG_P (index)
2991 && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
2992 {
2993 info->type = type;
2994 info->offset = index;
2995 info->shift = shift;
2996 return true;
2997 }
2998
2999 return false;
3000 }
3001
3002 static inline bool
3003 offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
3004 {
3005 return (offset >= -64 * GET_MODE_SIZE (mode)
3006 && offset < 64 * GET_MODE_SIZE (mode)
3007 && offset % GET_MODE_SIZE (mode) == 0);
3008 }
3009
3010 static inline bool
3011 offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED,
3012 HOST_WIDE_INT offset)
3013 {
3014 return offset >= -256 && offset < 256;
3015 }
3016
3017 static inline bool
3018 offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
3019 {
3020 return (offset >= 0
3021 && offset < 4096 * GET_MODE_SIZE (mode)
3022 && offset % GET_MODE_SIZE (mode) == 0);
3023 }
3024
3025 /* Return true if X is a valid address for machine mode MODE. If it is,
3026 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
3027 effect. OUTER_CODE is PARALLEL for a load/store pair. */
3028
3029 static bool
3030 aarch64_classify_address (struct aarch64_address_info *info,
3031 rtx x, enum machine_mode mode,
3032 RTX_CODE outer_code, bool strict_p)
3033 {
3034 enum rtx_code code = GET_CODE (x);
3035 rtx op0, op1;
3036 bool allow_reg_index_p =
3037 outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16;
3038
3039 /* Don't support anything other than POST_INC or REG addressing for
3040 AdvSIMD. */
3041 if (aarch64_vector_mode_p (mode)
3042 && (code != POST_INC && code != REG))
3043 return false;
3044
3045 switch (code)
3046 {
3047 case REG:
3048 case SUBREG:
3049 info->type = ADDRESS_REG_IMM;
3050 info->base = x;
3051 info->offset = const0_rtx;
3052 return aarch64_base_register_rtx_p (x, strict_p);
3053
3054 case PLUS:
3055 op0 = XEXP (x, 0);
3056 op1 = XEXP (x, 1);
3057 if (GET_MODE_SIZE (mode) != 0
3058 && CONST_INT_P (op1)
3059 && aarch64_base_register_rtx_p (op0, strict_p))
3060 {
3061 HOST_WIDE_INT offset = INTVAL (op1);
3062
3063 info->type = ADDRESS_REG_IMM;
3064 info->base = op0;
3065 info->offset = op1;
3066
3067 /* TImode and TFmode values are allowed in both pairs of X
3068 registers and individual Q registers. The available
3069 address modes are:
3070 X,X: 7-bit signed scaled offset
3071 Q: 9-bit signed offset
3072 We conservatively require an offset representable in either mode.
3073 */
3074 if (mode == TImode || mode == TFmode)
3075 return (offset_7bit_signed_scaled_p (mode, offset)
3076 && offset_9bit_signed_unscaled_p (mode, offset));
3077
3078 if (outer_code == PARALLEL)
3079 return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
3080 && offset_7bit_signed_scaled_p (mode, offset));
3081 else
3082 return (offset_9bit_signed_unscaled_p (mode, offset)
3083 || offset_12bit_unsigned_scaled_p (mode, offset));
3084 }
3085
3086 if (allow_reg_index_p)
3087 {
3088 /* Look for base + (scaled/extended) index register. */
3089 if (aarch64_base_register_rtx_p (op0, strict_p)
3090 && aarch64_classify_index (info, op1, mode, strict_p))
3091 {
3092 info->base = op0;
3093 return true;
3094 }
3095 if (aarch64_base_register_rtx_p (op1, strict_p)
3096 && aarch64_classify_index (info, op0, mode, strict_p))
3097 {
3098 info->base = op1;
3099 return true;
3100 }
3101 }
3102
3103 return false;
3104
3105 case POST_INC:
3106 case POST_DEC:
3107 case PRE_INC:
3108 case PRE_DEC:
3109 info->type = ADDRESS_REG_WB;
3110 info->base = XEXP (x, 0);
3111 info->offset = NULL_RTX;
3112 return aarch64_base_register_rtx_p (info->base, strict_p);
3113
3114 case POST_MODIFY:
3115 case PRE_MODIFY:
3116 info->type = ADDRESS_REG_WB;
3117 info->base = XEXP (x, 0);
3118 if (GET_CODE (XEXP (x, 1)) == PLUS
3119 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3120 && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
3121 && aarch64_base_register_rtx_p (info->base, strict_p))
3122 {
3123 HOST_WIDE_INT offset;
3124 info->offset = XEXP (XEXP (x, 1), 1);
3125 offset = INTVAL (info->offset);
3126
3127 /* TImode and TFmode values are allowed in both pairs of X
3128 registers and individual Q registers. The available
3129 address modes are:
3130 X,X: 7-bit signed scaled offset
3131 Q: 9-bit signed offset
3132 We conservatively require an offset representable in either mode.
3133 */
3134 if (mode == TImode || mode == TFmode)
3135 return (offset_7bit_signed_scaled_p (mode, offset)
3136 && offset_9bit_signed_unscaled_p (mode, offset));
3137
3138 if (outer_code == PARALLEL)
3139 return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
3140 && offset_7bit_signed_scaled_p (mode, offset));
3141 else
3142 return offset_9bit_signed_unscaled_p (mode, offset);
3143 }
3144 return false;
3145
3146 case CONST:
3147 case SYMBOL_REF:
3148 case LABEL_REF:
3149 /* load literal: pc-relative constant pool entry. Only supported
3150 for SI mode or larger. */
3151 info->type = ADDRESS_SYMBOLIC;
3152 if (outer_code != PARALLEL && GET_MODE_SIZE (mode) >= 4)
3153 {
3154 rtx sym, addend;
3155
3156 split_const (x, &sym, &addend);
3157 return (GET_CODE (sym) == LABEL_REF
3158 || (GET_CODE (sym) == SYMBOL_REF
3159 && CONSTANT_POOL_ADDRESS_P (sym)));
3160 }
3161 return false;
3162
3163 case LO_SUM:
3164 info->type = ADDRESS_LO_SUM;
3165 info->base = XEXP (x, 0);
3166 info->offset = XEXP (x, 1);
3167 if (allow_reg_index_p
3168 && aarch64_base_register_rtx_p (info->base, strict_p))
3169 {
3170 rtx sym, offs;
3171 split_const (info->offset, &sym, &offs);
3172 if (GET_CODE (sym) == SYMBOL_REF
3173 && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM)
3174 == SYMBOL_SMALL_ABSOLUTE))
3175 {
3176 /* The symbol and offset must be aligned to the access size. */
3177 unsigned int align;
3178 unsigned int ref_size;
3179
3180 if (CONSTANT_POOL_ADDRESS_P (sym))
3181 align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
3182 else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
3183 {
3184 tree exp = SYMBOL_REF_DECL (sym);
3185 align = TYPE_ALIGN (TREE_TYPE (exp));
3186 align = CONSTANT_ALIGNMENT (exp, align);
3187 }
3188 else if (SYMBOL_REF_DECL (sym))
3189 align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
3190 else
3191 align = BITS_PER_UNIT;
3192
3193 ref_size = GET_MODE_SIZE (mode);
3194 if (ref_size == 0)
3195 ref_size = GET_MODE_SIZE (DImode);
3196
3197 return ((INTVAL (offs) & (ref_size - 1)) == 0
3198 && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
3199 }
3200 }
3201 return false;
3202
3203 default:
3204 return false;
3205 }
3206 }
3207
3208 bool
3209 aarch64_symbolic_address_p (rtx x)
3210 {
3211 rtx offset;
3212
3213 split_const (x, &x, &offset);
3214 return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
3215 }
3216
3217 /* Classify the base of symbolic expression X, given that X appears in
3218 context CONTEXT. */
3219
3220 enum aarch64_symbol_type
3221 aarch64_classify_symbolic_expression (rtx x,
3222 enum aarch64_symbol_context context)
3223 {
3224 rtx offset;
3225
3226 split_const (x, &x, &offset);
3227 return aarch64_classify_symbol (x, context);
3228 }
3229
3230
3231 /* Return TRUE if X is a legitimate address for accessing memory in
3232 mode MODE. */
3233 static bool
3234 aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p)
3235 {
3236 struct aarch64_address_info addr;
3237
3238 return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
3239 }
3240
3241 /* Return TRUE if X is a legitimate address for accessing memory in
3242 mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
3243 pair operation. */
3244 bool
3245 aarch64_legitimate_address_p (enum machine_mode mode, rtx x,
3246 RTX_CODE outer_code, bool strict_p)
3247 {
3248 struct aarch64_address_info addr;
3249
3250 return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
3251 }
3252
3253 /* Return TRUE if rtx X is immediate constant 0.0 */
3254 bool
3255 aarch64_float_const_zero_rtx_p (rtx x)
3256 {
3257 REAL_VALUE_TYPE r;
3258
3259 if (GET_MODE (x) == VOIDmode)
3260 return false;
3261
3262 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3263 if (REAL_VALUE_MINUS_ZERO (r))
3264 return !HONOR_SIGNED_ZEROS (GET_MODE (x));
3265 return REAL_VALUES_EQUAL (r, dconst0);
3266 }
3267
3268 /* Return the fixed registers used for condition codes. */
3269
3270 static bool
3271 aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3272 {
3273 *p1 = CC_REGNUM;
3274 *p2 = INVALID_REGNUM;
3275 return true;
3276 }
3277
3278 enum machine_mode
3279 aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
3280 {
3281 /* All floating point compares return CCFP if it is an equality
3282 comparison, and CCFPE otherwise. */
3283 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3284 {
3285 switch (code)
3286 {
3287 case EQ:
3288 case NE:
3289 case UNORDERED:
3290 case ORDERED:
3291 case UNLT:
3292 case UNLE:
3293 case UNGT:
3294 case UNGE:
3295 case UNEQ:
3296 case LTGT:
3297 return CCFPmode;
3298
3299 case LT:
3300 case LE:
3301 case GT:
3302 case GE:
3303 return CCFPEmode;
3304
3305 default:
3306 gcc_unreachable ();
3307 }
3308 }
3309
3310 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
3311 && y == const0_rtx
3312 && (code == EQ || code == NE || code == LT || code == GE)
3313 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND
3314 || GET_CODE (x) == NEG))
3315 return CC_NZmode;
3316
3317 /* A compare with a shifted or negated operand. Because of canonicalization,
3318 the comparison will have to be swapped when we emit the assembly
3319 code. */
3320 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
3321 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG)
3322 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
3323 || GET_CODE (x) == LSHIFTRT
3324 || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND
3325 || GET_CODE (x) == NEG))
3326 return CC_SWPmode;
3327
3328 /* A compare of a mode narrower than SI mode against zero can be done
3329 by extending the value in the comparison. */
3330 if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode)
3331 && y == const0_rtx)
3332 /* Only use sign-extension if we really need it. */
3333 return ((code == GT || code == GE || code == LE || code == LT)
3334 ? CC_SESWPmode : CC_ZESWPmode);
3335
3336 /* For everything else, return CCmode. */
3337 return CCmode;
3338 }
3339
3340 static unsigned
3341 aarch64_get_condition_code (rtx x)
3342 {
3343 enum machine_mode mode = GET_MODE (XEXP (x, 0));
3344 enum rtx_code comp_code = GET_CODE (x);
3345
3346 if (GET_MODE_CLASS (mode) != MODE_CC)
3347 mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
3348
3349 switch (mode)
3350 {
3351 case CCFPmode:
3352 case CCFPEmode:
3353 switch (comp_code)
3354 {
3355 case GE: return AARCH64_GE;
3356 case GT: return AARCH64_GT;
3357 case LE: return AARCH64_LS;
3358 case LT: return AARCH64_MI;
3359 case NE: return AARCH64_NE;
3360 case EQ: return AARCH64_EQ;
3361 case ORDERED: return AARCH64_VC;
3362 case UNORDERED: return AARCH64_VS;
3363 case UNLT: return AARCH64_LT;
3364 case UNLE: return AARCH64_LE;
3365 case UNGT: return AARCH64_HI;
3366 case UNGE: return AARCH64_PL;
3367 default: gcc_unreachable ();
3368 }
3369 break;
3370
3371 case CCmode:
3372 switch (comp_code)
3373 {
3374 case NE: return AARCH64_NE;
3375 case EQ: return AARCH64_EQ;
3376 case GE: return AARCH64_GE;
3377 case GT: return AARCH64_GT;
3378 case LE: return AARCH64_LE;
3379 case LT: return AARCH64_LT;
3380 case GEU: return AARCH64_CS;
3381 case GTU: return AARCH64_HI;
3382 case LEU: return AARCH64_LS;
3383 case LTU: return AARCH64_CC;
3384 default: gcc_unreachable ();
3385 }
3386 break;
3387
3388 case CC_SWPmode:
3389 case CC_ZESWPmode:
3390 case CC_SESWPmode:
3391 switch (comp_code)
3392 {
3393 case NE: return AARCH64_NE;
3394 case EQ: return AARCH64_EQ;
3395 case GE: return AARCH64_LE;
3396 case GT: return AARCH64_LT;
3397 case LE: return AARCH64_GE;
3398 case LT: return AARCH64_GT;
3399 case GEU: return AARCH64_LS;
3400 case GTU: return AARCH64_CC;
3401 case LEU: return AARCH64_CS;
3402 case LTU: return AARCH64_HI;
3403 default: gcc_unreachable ();
3404 }
3405 break;
3406
3407 case CC_NZmode:
3408 switch (comp_code)
3409 {
3410 case NE: return AARCH64_NE;
3411 case EQ: return AARCH64_EQ;
3412 case GE: return AARCH64_PL;
3413 case LT: return AARCH64_MI;
3414 default: gcc_unreachable ();
3415 }
3416 break;
3417
3418 default:
3419 gcc_unreachable ();
3420 break;
3421 }
3422 }
3423
3424 static unsigned
3425 bit_count (unsigned HOST_WIDE_INT value)
3426 {
3427 unsigned count = 0;
3428
3429 while (value)
3430 {
3431 count++;
3432 value &= value - 1;
3433 }
3434
3435 return count;
3436 }
3437
3438 void
3439 aarch64_print_operand (FILE *f, rtx x, char code)
3440 {
3441 switch (code)
3442 {
3443 /* An integer or symbol address without a preceding # sign. */
3444 case 'c':
3445 switch (GET_CODE (x))
3446 {
3447 case CONST_INT:
3448 fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
3449 break;
3450
3451 case SYMBOL_REF:
3452 output_addr_const (f, x);
3453 break;
3454
3455 case CONST:
3456 if (GET_CODE (XEXP (x, 0)) == PLUS
3457 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
3458 {
3459 output_addr_const (f, x);
3460 break;
3461 }
3462 /* Fall through. */
3463
3464 default:
3465 output_operand_lossage ("Unsupported operand for code '%c'", code);
3466 }
3467 break;
3468
3469 case 'e':
3470 /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
3471 {
3472 int n;
3473
3474 if (GET_CODE (x) != CONST_INT
3475 || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
3476 {
3477 output_operand_lossage ("invalid operand for '%%%c'", code);
3478 return;
3479 }
3480
3481 switch (n)
3482 {
3483 case 3:
3484 fputc ('b', f);
3485 break;
3486 case 4:
3487 fputc ('h', f);
3488 break;
3489 case 5:
3490 fputc ('w', f);
3491 break;
3492 default:
3493 output_operand_lossage ("invalid operand for '%%%c'", code);
3494 return;
3495 }
3496 }
3497 break;
3498
3499 case 'p':
3500 {
3501 int n;
3502
3503 /* Print N such that 2^N == X. */
3504 if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0)
3505 {
3506 output_operand_lossage ("invalid operand for '%%%c'", code);
3507 return;
3508 }
3509
3510 asm_fprintf (f, "%d", n);
3511 }
3512 break;
3513
3514 case 'P':
3515 /* Print the number of non-zero bits in X (a const_int). */
3516 if (GET_CODE (x) != CONST_INT)
3517 {
3518 output_operand_lossage ("invalid operand for '%%%c'", code);
3519 return;
3520 }
3521
3522 asm_fprintf (f, "%u", bit_count (INTVAL (x)));
3523 break;
3524
3525 case 'H':
3526 /* Print the higher numbered register of a pair (TImode) of regs. */
3527 if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
3528 {
3529 output_operand_lossage ("invalid operand for '%%%c'", code);
3530 return;
3531 }
3532
3533 asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]);
3534 break;
3535
3536 case 'm':
3537 /* Print a condition (eq, ne, etc). */
3538
3539 /* CONST_TRUE_RTX means always -- that's the default. */
3540 if (x == const_true_rtx)
3541 return;
3542
3543 if (!COMPARISON_P (x))
3544 {
3545 output_operand_lossage ("invalid operand for '%%%c'", code);
3546 return;
3547 }
3548
3549 fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f);
3550 break;
3551
3552 case 'M':
3553 /* Print the inverse of a condition (eq <-> ne, etc). */
3554
3555 /* CONST_TRUE_RTX means never -- that's the default. */
3556 if (x == const_true_rtx)
3557 {
3558 fputs ("nv", f);
3559 return;
3560 }
3561
3562 if (!COMPARISON_P (x))
3563 {
3564 output_operand_lossage ("invalid operand for '%%%c'", code);
3565 return;
3566 }
3567
3568 fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE
3569 (aarch64_get_condition_code (x))], f);
3570 break;
3571
3572 case 'b':
3573 case 'h':
3574 case 's':
3575 case 'd':
3576 case 'q':
3577 /* Print a scalar FP/SIMD register name. */
3578 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
3579 {
3580 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
3581 return;
3582 }
3583 asm_fprintf (f, "%c%d", code, REGNO (x) - V0_REGNUM);
3584 break;
3585
3586 case 'S':
3587 case 'T':
3588 case 'U':
3589 case 'V':
3590 /* Print the first FP/SIMD register name in a list. */
3591 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
3592 {
3593 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
3594 return;
3595 }
3596 asm_fprintf (f, "v%d", REGNO (x) - V0_REGNUM + (code - 'S'));
3597 break;
3598
3599 case 'X':
3600 /* Print bottom 16 bits of integer constant in hex. */
3601 if (GET_CODE (x) != CONST_INT)
3602 {
3603 output_operand_lossage ("invalid operand for '%%%c'", code);
3604 return;
3605 }
3606 asm_fprintf (f, "0x%wx", UINTVAL (x) & 0xffff);
3607 break;
3608
3609 case 'w':
3610 case 'x':
3611 /* Print a general register name or the zero register (32-bit or
3612 64-bit). */
3613 if (x == const0_rtx
3614 || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x)))
3615 {
3616 asm_fprintf (f, "%czr", code);
3617 break;
3618 }
3619
3620 if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
3621 {
3622 asm_fprintf (f, "%c%d", code, REGNO (x) - R0_REGNUM);
3623 break;
3624 }
3625
3626 if (REG_P (x) && REGNO (x) == SP_REGNUM)
3627 {
3628 asm_fprintf (f, "%ssp", code == 'w' ? "w" : "");
3629 break;
3630 }
3631
3632 /* Fall through */
3633
3634 case 0:
3635 /* Print a normal operand, if it's a general register, then we
3636 assume DImode. */
3637 if (x == NULL)
3638 {
3639 output_operand_lossage ("missing operand");
3640 return;
3641 }
3642
3643 switch (GET_CODE (x))
3644 {
3645 case REG:
3646 asm_fprintf (f, "%s", reg_names [REGNO (x)]);
3647 break;
3648
3649 case MEM:
3650 aarch64_memory_reference_mode = GET_MODE (x);
3651 output_address (XEXP (x, 0));
3652 break;
3653
3654 case LABEL_REF:
3655 case SYMBOL_REF:
3656 output_addr_const (asm_out_file, x);
3657 break;
3658
3659 case CONST_INT:
3660 asm_fprintf (f, "%wd", INTVAL (x));
3661 break;
3662
3663 case CONST_VECTOR:
3664 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT)
3665 {
3666 gcc_assert (aarch64_const_vec_all_same_int_p (x,
3667 HOST_WIDE_INT_MIN,
3668 HOST_WIDE_INT_MAX));
3669 asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
3670 }
3671 else if (aarch64_simd_imm_zero_p (x, GET_MODE (x)))
3672 {
3673 fputc ('0', f);
3674 }
3675 else
3676 gcc_unreachable ();
3677 break;
3678
3679 case CONST_DOUBLE:
3680 /* CONST_DOUBLE can represent a double-width integer.
3681 In this case, the mode of x is VOIDmode. */
3682 if (GET_MODE (x) == VOIDmode)
3683 ; /* Do Nothing. */
3684 else if (aarch64_float_const_zero_rtx_p (x))
3685 {
3686 fputc ('0', f);
3687 break;
3688 }
3689 else if (aarch64_float_const_representable_p (x))
3690 {
3691 #define buf_size 20
3692 char float_buf[buf_size] = {'\0'};
3693 REAL_VALUE_TYPE r;
3694 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3695 real_to_decimal_for_mode (float_buf, &r,
3696 buf_size, buf_size,
3697 1, GET_MODE (x));
3698 asm_fprintf (asm_out_file, "%s", float_buf);
3699 break;
3700 #undef buf_size
3701 }
3702 output_operand_lossage ("invalid constant");
3703 return;
3704 default:
3705 output_operand_lossage ("invalid operand");
3706 return;
3707 }
3708 break;
3709
3710 case 'A':
3711 if (GET_CODE (x) == HIGH)
3712 x = XEXP (x, 0);
3713
3714 switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
3715 {
3716 case SYMBOL_SMALL_GOT:
3717 asm_fprintf (asm_out_file, ":got:");
3718 break;
3719
3720 case SYMBOL_SMALL_TLSGD:
3721 asm_fprintf (asm_out_file, ":tlsgd:");
3722 break;
3723
3724 case SYMBOL_SMALL_TLSDESC:
3725 asm_fprintf (asm_out_file, ":tlsdesc:");
3726 break;
3727
3728 case SYMBOL_SMALL_GOTTPREL:
3729 asm_fprintf (asm_out_file, ":gottprel:");
3730 break;
3731
3732 case SYMBOL_SMALL_TPREL:
3733 asm_fprintf (asm_out_file, ":tprel:");
3734 break;
3735
3736 case SYMBOL_TINY_GOT:
3737 gcc_unreachable ();
3738 break;
3739
3740 default:
3741 break;
3742 }
3743 output_addr_const (asm_out_file, x);
3744 break;
3745
3746 case 'L':
3747 switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
3748 {
3749 case SYMBOL_SMALL_GOT:
3750 asm_fprintf (asm_out_file, ":lo12:");
3751 break;
3752
3753 case SYMBOL_SMALL_TLSGD:
3754 asm_fprintf (asm_out_file, ":tlsgd_lo12:");
3755 break;
3756
3757 case SYMBOL_SMALL_TLSDESC:
3758 asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
3759 break;
3760
3761 case SYMBOL_SMALL_GOTTPREL:
3762 asm_fprintf (asm_out_file, ":gottprel_lo12:");
3763 break;
3764
3765 case SYMBOL_SMALL_TPREL:
3766 asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
3767 break;
3768
3769 case SYMBOL_TINY_GOT:
3770 asm_fprintf (asm_out_file, ":got:");
3771 break;
3772
3773 default:
3774 break;
3775 }
3776 output_addr_const (asm_out_file, x);
3777 break;
3778
3779 case 'G':
3780
3781 switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
3782 {
3783 case SYMBOL_SMALL_TPREL:
3784 asm_fprintf (asm_out_file, ":tprel_hi12:");
3785 break;
3786 default:
3787 break;
3788 }
3789 output_addr_const (asm_out_file, x);
3790 break;
3791
3792 default:
3793 output_operand_lossage ("invalid operand prefix '%%%c'", code);
3794 return;
3795 }
3796 }
3797
3798 void
3799 aarch64_print_operand_address (FILE *f, rtx x)
3800 {
3801 struct aarch64_address_info addr;
3802
3803 if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode,
3804 MEM, true))
3805 switch (addr.type)
3806 {
3807 case ADDRESS_REG_IMM:
3808 if (addr.offset == const0_rtx)
3809 asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]);
3810 else
3811 asm_fprintf (f, "[%s,%wd]", reg_names [REGNO (addr.base)],
3812 INTVAL (addr.offset));
3813 return;
3814
3815 case ADDRESS_REG_REG:
3816 if (addr.shift == 0)
3817 asm_fprintf (f, "[%s,%s]", reg_names [REGNO (addr.base)],
3818 reg_names [REGNO (addr.offset)]);
3819 else
3820 asm_fprintf (f, "[%s,%s,lsl %u]", reg_names [REGNO (addr.base)],
3821 reg_names [REGNO (addr.offset)], addr.shift);
3822 return;
3823
3824 case ADDRESS_REG_UXTW:
3825 if (addr.shift == 0)
3826 asm_fprintf (f, "[%s,w%d,uxtw]", reg_names [REGNO (addr.base)],
3827 REGNO (addr.offset) - R0_REGNUM);
3828 else
3829 asm_fprintf (f, "[%s,w%d,uxtw %u]", reg_names [REGNO (addr.base)],
3830 REGNO (addr.offset) - R0_REGNUM, addr.shift);
3831 return;
3832
3833 case ADDRESS_REG_SXTW:
3834 if (addr.shift == 0)
3835 asm_fprintf (f, "[%s,w%d,sxtw]", reg_names [REGNO (addr.base)],
3836 REGNO (addr.offset) - R0_REGNUM);
3837 else
3838 asm_fprintf (f, "[%s,w%d,sxtw %u]", reg_names [REGNO (addr.base)],
3839 REGNO (addr.offset) - R0_REGNUM, addr.shift);
3840 return;
3841
3842 case ADDRESS_REG_WB:
3843 switch (GET_CODE (x))
3844 {
3845 case PRE_INC:
3846 asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)],
3847 GET_MODE_SIZE (aarch64_memory_reference_mode));
3848 return;
3849 case POST_INC:
3850 asm_fprintf (f, "[%s],%d", reg_names [REGNO (addr.base)],
3851 GET_MODE_SIZE (aarch64_memory_reference_mode));
3852 return;
3853 case PRE_DEC:
3854 asm_fprintf (f, "[%s,-%d]!", reg_names [REGNO (addr.base)],
3855 GET_MODE_SIZE (aarch64_memory_reference_mode));
3856 return;
3857 case POST_DEC:
3858 asm_fprintf (f, "[%s],-%d", reg_names [REGNO (addr.base)],
3859 GET_MODE_SIZE (aarch64_memory_reference_mode));
3860 return;
3861 case PRE_MODIFY:
3862 asm_fprintf (f, "[%s,%wd]!", reg_names [REGNO (addr.base)],
3863 INTVAL (addr.offset));
3864 return;
3865 case POST_MODIFY:
3866 asm_fprintf (f, "[%s],%wd", reg_names [REGNO (addr.base)],
3867 INTVAL (addr.offset));
3868 return;
3869 default:
3870 break;
3871 }
3872 break;
3873
3874 case ADDRESS_LO_SUM:
3875 asm_fprintf (f, "[%s,#:lo12:", reg_names [REGNO (addr.base)]);
3876 output_addr_const (f, addr.offset);
3877 asm_fprintf (f, "]");
3878 return;
3879
3880 case ADDRESS_SYMBOLIC:
3881 break;
3882 }
3883
3884 output_addr_const (f, x);
3885 }
3886
3887 bool
3888 aarch64_label_mentioned_p (rtx x)
3889 {
3890 const char *fmt;
3891 int i;
3892
3893 if (GET_CODE (x) == LABEL_REF)
3894 return true;
3895
3896 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
3897 referencing instruction, but they are constant offsets, not
3898 symbols. */
3899 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
3900 return false;
3901
3902 fmt = GET_RTX_FORMAT (GET_CODE (x));
3903 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3904 {
3905 if (fmt[i] == 'E')
3906 {
3907 int j;
3908
3909 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3910 if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
3911 return 1;
3912 }
3913 else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
3914 return 1;
3915 }
3916
3917 return 0;
3918 }
3919
3920 /* Implement REGNO_REG_CLASS. */
3921
3922 enum reg_class
3923 aarch64_regno_regclass (unsigned regno)
3924 {
3925 if (GP_REGNUM_P (regno))
3926 return CORE_REGS;
3927
3928 if (regno == SP_REGNUM)
3929 return STACK_REG;
3930
3931 if (regno == FRAME_POINTER_REGNUM
3932 || regno == ARG_POINTER_REGNUM)
3933 return POINTER_REGS;
3934
3935 if (FP_REGNUM_P (regno))
3936 return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS;
3937
3938 return NO_REGS;
3939 }
3940
3941 /* Try a machine-dependent way of reloading an illegitimate address
3942 operand. If we find one, push the reload and return the new rtx. */
3943
3944 rtx
3945 aarch64_legitimize_reload_address (rtx *x_p,
3946 enum machine_mode mode,
3947 int opnum, int type,
3948 int ind_levels ATTRIBUTE_UNUSED)
3949 {
3950 rtx x = *x_p;
3951
3952 /* Do not allow mem (plus (reg, const)) if vector mode. */
3953 if (aarch64_vector_mode_p (mode)
3954 && GET_CODE (x) == PLUS
3955 && REG_P (XEXP (x, 0))
3956 && CONST_INT_P (XEXP (x, 1)))
3957 {
3958 rtx orig_rtx = x;
3959 x = copy_rtx (x);
3960 push_reload (orig_rtx, NULL_RTX, x_p, NULL,
3961 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3962 opnum, (enum reload_type) type);
3963 return x;
3964 }
3965
3966 /* We must recognize output that we have already generated ourselves. */
3967 if (GET_CODE (x) == PLUS
3968 && GET_CODE (XEXP (x, 0)) == PLUS
3969 && REG_P (XEXP (XEXP (x, 0), 0))
3970 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3971 && CONST_INT_P (XEXP (x, 1)))
3972 {
3973 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3974 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3975 opnum, (enum reload_type) type);
3976 return x;
3977 }
3978
3979 /* We wish to handle large displacements off a base register by splitting
3980 the addend across an add and the mem insn. This can cut the number of
3981 extra insns needed from 3 to 1. It is only useful for load/store of a
3982 single register with 12 bit offset field. */
3983 if (GET_CODE (x) == PLUS
3984 && REG_P (XEXP (x, 0))
3985 && CONST_INT_P (XEXP (x, 1))
3986 && HARD_REGISTER_P (XEXP (x, 0))
3987 && mode != TImode
3988 && mode != TFmode
3989 && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true))
3990 {
3991 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
3992 HOST_WIDE_INT low = val & 0xfff;
3993 HOST_WIDE_INT high = val - low;
3994 HOST_WIDE_INT offs;
3995 rtx cst;
3996 enum machine_mode xmode = GET_MODE (x);
3997
3998 /* In ILP32, xmode can be either DImode or SImode. */
3999 gcc_assert (xmode == DImode || xmode == SImode);
4000
4001 /* Reload non-zero BLKmode offsets. This is because we cannot ascertain
4002 BLKmode alignment. */
4003 if (GET_MODE_SIZE (mode) == 0)
4004 return NULL_RTX;
4005
4006 offs = low % GET_MODE_SIZE (mode);
4007
4008 /* Align misaligned offset by adjusting high part to compensate. */
4009 if (offs != 0)
4010 {
4011 if (aarch64_uimm12_shift (high + offs))
4012 {
4013 /* Align down. */
4014 low = low - offs;
4015 high = high + offs;
4016 }
4017 else
4018 {
4019 /* Align up. */
4020 offs = GET_MODE_SIZE (mode) - offs;
4021 low = low + offs;
4022 high = high + (low & 0x1000) - offs;
4023 low &= 0xfff;
4024 }
4025 }
4026
4027 /* Check for overflow. */
4028 if (high + low != val)
4029 return NULL_RTX;
4030
4031 cst = GEN_INT (high);
4032 if (!aarch64_uimm12_shift (high))
4033 cst = force_const_mem (xmode, cst);
4034
4035 /* Reload high part into base reg, leaving the low part
4036 in the mem instruction. */
4037 x = gen_rtx_PLUS (xmode,
4038 gen_rtx_PLUS (xmode, XEXP (x, 0), cst),
4039 GEN_INT (low));
4040
4041 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4042 BASE_REG_CLASS, xmode, VOIDmode, 0, 0,
4043 opnum, (enum reload_type) type);
4044 return x;
4045 }
4046
4047 return NULL_RTX;
4048 }
4049
4050
4051 static reg_class_t
4052 aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
4053 reg_class_t rclass,
4054 enum machine_mode mode,
4055 secondary_reload_info *sri)
4056 {
4057 /* Without the TARGET_SIMD instructions we cannot move a Q register
4058 to a Q register directly. We need a scratch. */
4059 if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
4060 && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
4061 && reg_class_subset_p (rclass, FP_REGS))
4062 {
4063 if (mode == TFmode)
4064 sri->icode = CODE_FOR_aarch64_reload_movtf;
4065 else if (mode == TImode)
4066 sri->icode = CODE_FOR_aarch64_reload_movti;
4067 return NO_REGS;
4068 }
4069
4070 /* A TFmode or TImode memory access should be handled via an FP_REGS
4071 because AArch64 has richer addressing modes for LDR/STR instructions
4072 than LDP/STP instructions. */
4073 if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS
4074 && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
4075 return FP_REGS;
4076
4077 if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
4078 return CORE_REGS;
4079
4080 return NO_REGS;
4081 }
4082
4083 static bool
4084 aarch64_can_eliminate (const int from, const int to)
4085 {
4086 /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
4087 HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
4088
4089 if (frame_pointer_needed)
4090 {
4091 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4092 return true;
4093 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
4094 return false;
4095 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
4096 && !cfun->calls_alloca)
4097 return true;
4098 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4099 return true;
4100 return false;
4101 }
4102 else
4103 {
4104 /* If we decided that we didn't need a leaf frame pointer but then used
4105 LR in the function, then we'll want a frame pointer after all, so
4106 prevent this elimination to ensure a frame pointer is used.
4107
4108 NOTE: the original value of flag_omit_frame_pointer gets trashed
4109 IFF flag_omit_leaf_frame_pointer is true, so we check the value
4110 of faked_omit_frame_pointer here (which is true when we always
4111 wish to keep non-leaf frame pointers but only wish to keep leaf frame
4112 pointers when LR is clobbered). */
4113 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
4114 && df_regs_ever_live_p (LR_REGNUM)
4115 && faked_omit_frame_pointer)
4116 return false;
4117 }
4118
4119 return true;
4120 }
4121
4122 HOST_WIDE_INT
4123 aarch64_initial_elimination_offset (unsigned from, unsigned to)
4124 {
4125 HOST_WIDE_INT frame_size;
4126 HOST_WIDE_INT offset;
4127
4128 aarch64_layout_frame ();
4129 frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size
4130 + crtl->outgoing_args_size
4131 + cfun->machine->saved_varargs_size);
4132
4133 frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT);
4134 offset = frame_size;
4135
4136 if (to == HARD_FRAME_POINTER_REGNUM)
4137 {
4138 if (from == ARG_POINTER_REGNUM)
4139 return offset - crtl->outgoing_args_size;
4140
4141 if (from == FRAME_POINTER_REGNUM)
4142 return cfun->machine->frame.saved_regs_size;
4143 }
4144
4145 if (to == STACK_POINTER_REGNUM)
4146 {
4147 if (from == FRAME_POINTER_REGNUM)
4148 {
4149 HOST_WIDE_INT elim = crtl->outgoing_args_size
4150 + cfun->machine->frame.saved_regs_size
4151 - cfun->machine->frame.fp_lr_offset;
4152 elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT);
4153 return elim;
4154 }
4155 }
4156
4157 return offset;
4158 }
4159
4160
4161 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
4162 previous frame. */
4163
4164 rtx
4165 aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4166 {
4167 if (count != 0)
4168 return const0_rtx;
4169 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
4170 }
4171
4172
4173 static void
4174 aarch64_asm_trampoline_template (FILE *f)
4175 {
4176 if (TARGET_ILP32)
4177 {
4178 asm_fprintf (f, "\tldr\tw%d, .+16\n", IP1_REGNUM - R0_REGNUM);
4179 asm_fprintf (f, "\tldr\tw%d, .+16\n", STATIC_CHAIN_REGNUM - R0_REGNUM);
4180 }
4181 else
4182 {
4183 asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]);
4184 asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]);
4185 }
4186 asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
4187 assemble_aligned_integer (4, const0_rtx);
4188 assemble_aligned_integer (POINTER_BYTES, const0_rtx);
4189 assemble_aligned_integer (POINTER_BYTES, const0_rtx);
4190 }
4191
4192 static void
4193 aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
4194 {
4195 rtx fnaddr, mem, a_tramp;
4196 const int tramp_code_sz = 16;
4197
4198 /* Don't need to copy the trailing D-words, we fill those in below. */
4199 emit_block_move (m_tramp, assemble_trampoline_template (),
4200 GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL);
4201 mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz);
4202 fnaddr = XEXP (DECL_RTL (fndecl), 0);
4203 if (GET_MODE (fnaddr) != ptr_mode)
4204 fnaddr = convert_memory_address (ptr_mode, fnaddr);
4205 emit_move_insn (mem, fnaddr);
4206
4207 mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz + POINTER_BYTES);
4208 emit_move_insn (mem, chain_value);
4209
4210 /* XXX We should really define a "clear_cache" pattern and use
4211 gen_clear_cache(). */
4212 a_tramp = XEXP (m_tramp, 0);
4213 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
4214 LCT_NORMAL, VOIDmode, 2, a_tramp, ptr_mode,
4215 plus_constant (ptr_mode, a_tramp, TRAMPOLINE_SIZE),
4216 ptr_mode);
4217 }
4218
4219 static unsigned char
4220 aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
4221 {
4222 switch (regclass)
4223 {
4224 case CORE_REGS:
4225 case POINTER_REGS:
4226 case GENERAL_REGS:
4227 case ALL_REGS:
4228 case FP_REGS:
4229 case FP_LO_REGS:
4230 return
4231 aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 :
4232 (GET_MODE_SIZE (mode) + 7) / 8;
4233 case STACK_REG:
4234 return 1;
4235
4236 case NO_REGS:
4237 return 0;
4238
4239 default:
4240 break;
4241 }
4242 gcc_unreachable ();
4243 }
4244
4245 static reg_class_t
4246 aarch64_preferred_reload_class (rtx x, reg_class_t regclass)
4247 {
4248 if (regclass == POINTER_REGS)
4249 return GENERAL_REGS;
4250
4251 if (regclass == STACK_REG)
4252 {
4253 if (REG_P(x)
4254 && reg_class_subset_p (REGNO_REG_CLASS (REGNO (x)), POINTER_REGS))
4255 return regclass;
4256
4257 return NO_REGS;
4258 }
4259
4260 /* If it's an integer immediate that MOVI can't handle, then
4261 FP_REGS is not an option, so we return NO_REGS instead. */
4262 if (CONST_INT_P (x) && reg_class_subset_p (regclass, FP_REGS)
4263 && !aarch64_simd_imm_scalar_p (x, GET_MODE (x)))
4264 return NO_REGS;
4265
4266 /* Register eliminiation can result in a request for
4267 SP+constant->FP_REGS. We cannot support such operations which
4268 use SP as source and an FP_REG as destination, so reject out
4269 right now. */
4270 if (! reg_class_subset_p (regclass, GENERAL_REGS) && GET_CODE (x) == PLUS)
4271 {
4272 rtx lhs = XEXP (x, 0);
4273
4274 /* Look through a possible SUBREG introduced by ILP32. */
4275 if (GET_CODE (lhs) == SUBREG)
4276 lhs = SUBREG_REG (lhs);
4277
4278 gcc_assert (REG_P (lhs));
4279 gcc_assert (reg_class_subset_p (REGNO_REG_CLASS (REGNO (lhs)),
4280 POINTER_REGS));
4281 return NO_REGS;
4282 }
4283
4284 return regclass;
4285 }
4286
4287 void
4288 aarch64_asm_output_labelref (FILE* f, const char *name)
4289 {
4290 asm_fprintf (f, "%U%s", name);
4291 }
4292
4293 static void
4294 aarch64_elf_asm_constructor (rtx symbol, int priority)
4295 {
4296 if (priority == DEFAULT_INIT_PRIORITY)
4297 default_ctor_section_asm_out_constructor (symbol, priority);
4298 else
4299 {
4300 section *s;
4301 char buf[18];
4302 snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
4303 s = get_section (buf, SECTION_WRITE, NULL);
4304 switch_to_section (s);
4305 assemble_align (POINTER_SIZE);
4306 assemble_aligned_integer (POINTER_BYTES, symbol);
4307 }
4308 }
4309
4310 static void
4311 aarch64_elf_asm_destructor (rtx symbol, int priority)
4312 {
4313 if (priority == DEFAULT_INIT_PRIORITY)
4314 default_dtor_section_asm_out_destructor (symbol, priority);
4315 else
4316 {
4317 section *s;
4318 char buf[18];
4319 snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
4320 s = get_section (buf, SECTION_WRITE, NULL);
4321 switch_to_section (s);
4322 assemble_align (POINTER_SIZE);
4323 assemble_aligned_integer (POINTER_BYTES, symbol);
4324 }
4325 }
4326
4327 const char*
4328 aarch64_output_casesi (rtx *operands)
4329 {
4330 char buf[100];
4331 char label[100];
4332 rtx diff_vec = PATTERN (next_active_insn (operands[2]));
4333 int index;
4334 static const char *const patterns[4][2] =
4335 {
4336 {
4337 "ldrb\t%w3, [%0,%w1,uxtw]",
4338 "add\t%3, %4, %w3, sxtb #2"
4339 },
4340 {
4341 "ldrh\t%w3, [%0,%w1,uxtw #1]",
4342 "add\t%3, %4, %w3, sxth #2"
4343 },
4344 {
4345 "ldr\t%w3, [%0,%w1,uxtw #2]",
4346 "add\t%3, %4, %w3, sxtw #2"
4347 },
4348 /* We assume that DImode is only generated when not optimizing and
4349 that we don't really need 64-bit address offsets. That would
4350 imply an object file with 8GB of code in a single function! */
4351 {
4352 "ldr\t%w3, [%0,%w1,uxtw #2]",
4353 "add\t%3, %4, %w3, sxtw #2"
4354 }
4355 };
4356
4357 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
4358
4359 index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
4360
4361 gcc_assert (index >= 0 && index <= 3);
4362
4363 /* Need to implement table size reduction, by chaning the code below. */
4364 output_asm_insn (patterns[index][0], operands);
4365 ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
4366 snprintf (buf, sizeof (buf),
4367 "adr\t%%4, %s", targetm.strip_name_encoding (label));
4368 output_asm_insn (buf, operands);
4369 output_asm_insn (patterns[index][1], operands);
4370 output_asm_insn ("br\t%3", operands);
4371 assemble_label (asm_out_file, label);
4372 return "";
4373 }
4374
4375
4376 /* Return size in bits of an arithmetic operand which is shifted/scaled and
4377 masked such that it is suitable for a UXTB, UXTH, or UXTW extend
4378 operator. */
4379
4380 int
4381 aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
4382 {
4383 if (shift >= 0 && shift <= 3)
4384 {
4385 int size;
4386 for (size = 8; size <= 32; size *= 2)
4387 {
4388 HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
4389 if (mask == bits << shift)
4390 return size;
4391 }
4392 }
4393 return 0;
4394 }
4395
4396 static bool
4397 aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
4398 const_rtx x ATTRIBUTE_UNUSED)
4399 {
4400 /* We can't use blocks for constants when we're using a per-function
4401 constant pool. */
4402 return false;
4403 }
4404
4405 static section *
4406 aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
4407 rtx x ATTRIBUTE_UNUSED,
4408 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
4409 {
4410 /* Force all constant pool entries into the current function section. */
4411 return function_section (current_function_decl);
4412 }
4413
4414
4415 /* Costs. */
4416
4417 /* Helper function for rtx cost calculation. Strip a shift expression
4418 from X. Returns the inner operand if successful, or the original
4419 expression on failure. */
4420 static rtx
4421 aarch64_strip_shift (rtx x)
4422 {
4423 rtx op = x;
4424
4425 if ((GET_CODE (op) == ASHIFT
4426 || GET_CODE (op) == ASHIFTRT
4427 || GET_CODE (op) == LSHIFTRT)
4428 && CONST_INT_P (XEXP (op, 1)))
4429 return XEXP (op, 0);
4430
4431 if (GET_CODE (op) == MULT
4432 && CONST_INT_P (XEXP (op, 1))
4433 && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
4434 return XEXP (op, 0);
4435
4436 return x;
4437 }
4438
4439 /* Helper function for rtx cost calculation. Strip a shift or extend
4440 expression from X. Returns the inner operand if successful, or the
4441 original expression on failure. We deal with a number of possible
4442 canonicalization variations here. */
4443 static rtx
4444 aarch64_strip_shift_or_extend (rtx x)
4445 {
4446 rtx op = x;
4447
4448 /* Zero and sign extraction of a widened value. */
4449 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
4450 && XEXP (op, 2) == const0_rtx
4451 && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
4452 XEXP (op, 1)))
4453 return XEXP (XEXP (op, 0), 0);
4454
4455 /* It can also be represented (for zero-extend) as an AND with an
4456 immediate. */
4457 if (GET_CODE (op) == AND
4458 && GET_CODE (XEXP (op, 0)) == MULT
4459 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
4460 && CONST_INT_P (XEXP (op, 1))
4461 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
4462 INTVAL (XEXP (op, 1))) != 0)
4463 return XEXP (XEXP (op, 0), 0);
4464
4465 /* Now handle extended register, as this may also have an optional
4466 left shift by 1..4. */
4467 if (GET_CODE (op) == ASHIFT
4468 && CONST_INT_P (XEXP (op, 1))
4469 && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
4470 op = XEXP (op, 0);
4471
4472 if (GET_CODE (op) == ZERO_EXTEND
4473 || GET_CODE (op) == SIGN_EXTEND)
4474 op = XEXP (op, 0);
4475
4476 if (op != x)
4477 return op;
4478
4479 return aarch64_strip_shift (x);
4480 }
4481
4482 /* Calculate the cost of calculating X, storing it in *COST. Result
4483 is true if the total cost of the operation has now been calculated. */
4484 static bool
4485 aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED,
4486 int param ATTRIBUTE_UNUSED, int *cost, bool speed)
4487 {
4488 rtx op0, op1;
4489 const struct cpu_rtx_cost_table *extra_cost
4490 = aarch64_tune_params->insn_extra_cost;
4491
4492 switch (code)
4493 {
4494 case SET:
4495 op0 = SET_DEST (x);
4496 op1 = SET_SRC (x);
4497
4498 switch (GET_CODE (op0))
4499 {
4500 case MEM:
4501 if (speed)
4502 *cost += extra_cost->memory_store;
4503
4504 if (op1 != const0_rtx)
4505 *cost += rtx_cost (op1, SET, 1, speed);
4506 return true;
4507
4508 case SUBREG:
4509 if (! REG_P (SUBREG_REG (op0)))
4510 *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed);
4511 /* Fall through. */
4512 case REG:
4513 /* Cost is just the cost of the RHS of the set. */
4514 *cost += rtx_cost (op1, SET, 1, true);
4515 return true;
4516
4517 case ZERO_EXTRACT: /* Bit-field insertion. */
4518 case SIGN_EXTRACT:
4519 /* Strip any redundant widening of the RHS to meet the width of
4520 the target. */
4521 if (GET_CODE (op1) == SUBREG)
4522 op1 = SUBREG_REG (op1);
4523 if ((GET_CODE (op1) == ZERO_EXTEND
4524 || GET_CODE (op1) == SIGN_EXTEND)
4525 && GET_CODE (XEXP (op0, 1)) == CONST_INT
4526 && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
4527 >= INTVAL (XEXP (op0, 1))))
4528 op1 = XEXP (op1, 0);
4529 *cost += rtx_cost (op1, SET, 1, speed);
4530 return true;
4531
4532 default:
4533 break;
4534 }
4535 return false;
4536
4537 case MEM:
4538 if (speed)
4539 *cost += extra_cost->memory_load;
4540
4541 return true;
4542
4543 case NEG:
4544 op0 = CONST0_RTX (GET_MODE (x));
4545 op1 = XEXP (x, 0);
4546 goto cost_minus;
4547
4548 case COMPARE:
4549 op0 = XEXP (x, 0);
4550 op1 = XEXP (x, 1);
4551
4552 if (op1 == const0_rtx
4553 && GET_CODE (op0) == AND)
4554 {
4555 x = op0;
4556 goto cost_logic;
4557 }
4558
4559 /* Comparisons can work if the order is swapped.
4560 Canonicalization puts the more complex operation first, but
4561 we want it in op1. */
4562 if (! (REG_P (op0)
4563 || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
4564 {
4565 op0 = XEXP (x, 1);
4566 op1 = XEXP (x, 0);
4567 }
4568 goto cost_minus;
4569
4570 case MINUS:
4571 op0 = XEXP (x, 0);
4572 op1 = XEXP (x, 1);
4573
4574 cost_minus:
4575 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4576 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
4577 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
4578 {
4579 if (op0 != const0_rtx)
4580 *cost += rtx_cost (op0, MINUS, 0, speed);
4581
4582 if (CONST_INT_P (op1))
4583 {
4584 if (!aarch64_uimm12_shift (INTVAL (op1)))
4585 *cost += rtx_cost (op1, MINUS, 1, speed);
4586 }
4587 else
4588 {
4589 op1 = aarch64_strip_shift_or_extend (op1);
4590 *cost += rtx_cost (op1, MINUS, 1, speed);
4591 }
4592 return true;
4593 }
4594
4595 return false;
4596
4597 case PLUS:
4598 op0 = XEXP (x, 0);
4599 op1 = XEXP (x, 1);
4600
4601 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
4602 {
4603 if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1)))
4604 {
4605 *cost += rtx_cost (op0, PLUS, 0, speed);
4606 }
4607 else
4608 {
4609 rtx new_op0 = aarch64_strip_shift_or_extend (op0);
4610
4611 if (new_op0 == op0
4612 && GET_CODE (op0) == MULT)
4613 {
4614 if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND
4615 && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND)
4616 || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND
4617 && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND))
4618 {
4619 *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0,
4620 speed)
4621 + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1,
4622 speed)
4623 + rtx_cost (op1, PLUS, 1, speed));
4624 if (speed)
4625 *cost += extra_cost->int_multiply_extend_add;
4626 return true;
4627 }
4628 *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
4629 + rtx_cost (XEXP (op0, 1), MULT, 1, speed)
4630 + rtx_cost (op1, PLUS, 1, speed));
4631
4632 if (speed)
4633 *cost += extra_cost->int_multiply_add;
4634 }
4635
4636 *cost += (rtx_cost (new_op0, PLUS, 0, speed)
4637 + rtx_cost (op1, PLUS, 1, speed));
4638 }
4639 return true;
4640 }
4641
4642 return false;
4643
4644 case IOR:
4645 case XOR:
4646 case AND:
4647 cost_logic:
4648 op0 = XEXP (x, 0);
4649 op1 = XEXP (x, 1);
4650
4651 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
4652 {
4653 if (CONST_INT_P (op1)
4654 && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x)))
4655 {
4656 *cost += rtx_cost (op0, AND, 0, speed);
4657 }
4658 else
4659 {
4660 if (GET_CODE (op0) == NOT)
4661 op0 = XEXP (op0, 0);
4662 op0 = aarch64_strip_shift (op0);
4663 *cost += (rtx_cost (op0, AND, 0, speed)
4664 + rtx_cost (op1, AND, 1, speed));
4665 }
4666 return true;
4667 }
4668 return false;
4669
4670 case ZERO_EXTEND:
4671 if ((GET_MODE (x) == DImode
4672 && GET_MODE (XEXP (x, 0)) == SImode)
4673 || GET_CODE (XEXP (x, 0)) == MEM)
4674 {
4675 *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed);
4676 return true;
4677 }
4678 return false;
4679
4680 case SIGN_EXTEND:
4681 if (GET_CODE (XEXP (x, 0)) == MEM)
4682 {
4683 *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed);
4684 return true;
4685 }
4686 return false;
4687
4688 case ROTATE:
4689 if (!CONST_INT_P (XEXP (x, 1)))
4690 *cost += COSTS_N_INSNS (2);
4691 /* Fall through. */
4692 case ROTATERT:
4693 case LSHIFTRT:
4694 case ASHIFT:
4695 case ASHIFTRT:
4696
4697 /* Shifting by a register often takes an extra cycle. */
4698 if (speed && !CONST_INT_P (XEXP (x, 1)))
4699 *cost += extra_cost->register_shift;
4700
4701 *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed);
4702 return true;
4703
4704 case HIGH:
4705 if (!CONSTANT_P (XEXP (x, 0)))
4706 *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed);
4707 return true;
4708
4709 case LO_SUM:
4710 if (!CONSTANT_P (XEXP (x, 1)))
4711 *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed);
4712 *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed);
4713 return true;
4714
4715 case ZERO_EXTRACT:
4716 case SIGN_EXTRACT:
4717 *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed);
4718 return true;
4719
4720 case MULT:
4721 op0 = XEXP (x, 0);
4722 op1 = XEXP (x, 1);
4723
4724 *cost = COSTS_N_INSNS (1);
4725 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
4726 {
4727 if (CONST_INT_P (op1)
4728 && exact_log2 (INTVAL (op1)) > 0)
4729 {
4730 *cost += rtx_cost (op0, ASHIFT, 0, speed);
4731 return true;
4732 }
4733
4734 if ((GET_CODE (op0) == ZERO_EXTEND
4735 && GET_CODE (op1) == ZERO_EXTEND)
4736 || (GET_CODE (op0) == SIGN_EXTEND
4737 && GET_CODE (op1) == SIGN_EXTEND))
4738 {
4739 *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
4740 + rtx_cost (XEXP (op1, 0), MULT, 1, speed));
4741 if (speed)
4742 *cost += extra_cost->int_multiply_extend;
4743 return true;
4744 }
4745
4746 if (speed)
4747 *cost += extra_cost->int_multiply;
4748 }
4749 else if (speed)
4750 {
4751 if (GET_MODE (x) == DFmode)
4752 *cost += extra_cost->double_multiply;
4753 else if (GET_MODE (x) == SFmode)
4754 *cost += extra_cost->float_multiply;
4755 }
4756
4757 return false; /* All arguments need to be in registers. */
4758
4759 case MOD:
4760 case UMOD:
4761 *cost = COSTS_N_INSNS (2);
4762 if (speed)
4763 {
4764 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
4765 *cost += (extra_cost->int_multiply_add
4766 + extra_cost->int_divide);
4767 else if (GET_MODE (x) == DFmode)
4768 *cost += (extra_cost->double_multiply
4769 + extra_cost->double_divide);
4770 else if (GET_MODE (x) == SFmode)
4771 *cost += (extra_cost->float_multiply
4772 + extra_cost->float_divide);
4773 }
4774 return false; /* All arguments need to be in registers. */
4775
4776 case DIV:
4777 case UDIV:
4778 *cost = COSTS_N_INSNS (1);
4779 if (speed)
4780 {
4781 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
4782 *cost += extra_cost->int_divide;
4783 else if (GET_MODE (x) == DFmode)
4784 *cost += extra_cost->double_divide;
4785 else if (GET_MODE (x) == SFmode)
4786 *cost += extra_cost->float_divide;
4787 }
4788 return false; /* All arguments need to be in registers. */
4789
4790 default:
4791 break;
4792 }
4793 return false;
4794 }
4795
4796 static int
4797 aarch64_address_cost (rtx x ATTRIBUTE_UNUSED,
4798 enum machine_mode mode ATTRIBUTE_UNUSED,
4799 addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
4800 {
4801 enum rtx_code c = GET_CODE (x);
4802 const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost;
4803
4804 if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
4805 return addr_cost->pre_modify;
4806
4807 if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
4808 return addr_cost->post_modify;
4809
4810 if (c == PLUS)
4811 {
4812 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4813 return addr_cost->imm_offset;
4814 else if (GET_CODE (XEXP (x, 0)) == MULT
4815 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4816 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
4817 return addr_cost->register_extend;
4818
4819 return addr_cost->register_offset;
4820 }
4821 else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4822 return addr_cost->imm_offset;
4823
4824 return 0;
4825 }
4826
4827 static int
4828 aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
4829 reg_class_t from, reg_class_t to)
4830 {
4831 const struct cpu_regmove_cost *regmove_cost
4832 = aarch64_tune_params->regmove_cost;
4833
4834 if (from == GENERAL_REGS && to == GENERAL_REGS)
4835 return regmove_cost->GP2GP;
4836 else if (from == GENERAL_REGS)
4837 return regmove_cost->GP2FP;
4838 else if (to == GENERAL_REGS)
4839 return regmove_cost->FP2GP;
4840
4841 /* When AdvSIMD instructions are disabled it is not possible to move
4842 a 128-bit value directly between Q registers. This is handled in
4843 secondary reload. A general register is used as a scratch to move
4844 the upper DI value and the lower DI value is moved directly,
4845 hence the cost is the sum of three moves. */
4846
4847 if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128)
4848 return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
4849
4850 return regmove_cost->FP2FP;
4851 }
4852
4853 static int
4854 aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
4855 reg_class_t rclass ATTRIBUTE_UNUSED,
4856 bool in ATTRIBUTE_UNUSED)
4857 {
4858 return aarch64_tune_params->memmov_cost;
4859 }
4860
4861 /* Vectorizer cost model target hooks. */
4862
4863 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4864 static int
4865 aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4866 tree vectype,
4867 int misalign ATTRIBUTE_UNUSED)
4868 {
4869 unsigned elements;
4870
4871 switch (type_of_cost)
4872 {
4873 case scalar_stmt:
4874 return aarch64_tune_params->vec_costs->scalar_stmt_cost;
4875
4876 case scalar_load:
4877 return aarch64_tune_params->vec_costs->scalar_load_cost;
4878
4879 case scalar_store:
4880 return aarch64_tune_params->vec_costs->scalar_store_cost;
4881
4882 case vector_stmt:
4883 return aarch64_tune_params->vec_costs->vec_stmt_cost;
4884
4885 case vector_load:
4886 return aarch64_tune_params->vec_costs->vec_align_load_cost;
4887
4888 case vector_store:
4889 return aarch64_tune_params->vec_costs->vec_store_cost;
4890
4891 case vec_to_scalar:
4892 return aarch64_tune_params->vec_costs->vec_to_scalar_cost;
4893
4894 case scalar_to_vec:
4895 return aarch64_tune_params->vec_costs->scalar_to_vec_cost;
4896
4897 case unaligned_load:
4898 return aarch64_tune_params->vec_costs->vec_unalign_load_cost;
4899
4900 case unaligned_store:
4901 return aarch64_tune_params->vec_costs->vec_unalign_store_cost;
4902
4903 case cond_branch_taken:
4904 return aarch64_tune_params->vec_costs->cond_taken_branch_cost;
4905
4906 case cond_branch_not_taken:
4907 return aarch64_tune_params->vec_costs->cond_not_taken_branch_cost;
4908
4909 case vec_perm:
4910 case vec_promote_demote:
4911 return aarch64_tune_params->vec_costs->vec_stmt_cost;
4912
4913 case vec_construct:
4914 elements = TYPE_VECTOR_SUBPARTS (vectype);
4915 return elements / 2 + 1;
4916
4917 default:
4918 gcc_unreachable ();
4919 }
4920 }
4921
4922 /* Implement targetm.vectorize.add_stmt_cost. */
4923 static unsigned
4924 aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4925 struct _stmt_vec_info *stmt_info, int misalign,
4926 enum vect_cost_model_location where)
4927 {
4928 unsigned *cost = (unsigned *) data;
4929 unsigned retval = 0;
4930
4931 if (flag_vect_cost_model)
4932 {
4933 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4934 int stmt_cost =
4935 aarch64_builtin_vectorization_cost (kind, vectype, misalign);
4936
4937 /* Statements in an inner loop relative to the loop being
4938 vectorized are weighted more heavily. The value here is
4939 a function (linear for now) of the loop nest level. */
4940 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4941 {
4942 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4943 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
4944 unsigned nest_level = loop_depth (loop);
4945
4946 count *= nest_level;
4947 }
4948
4949 retval = (unsigned) (count * stmt_cost);
4950 cost[where] += retval;
4951 }
4952
4953 return retval;
4954 }
4955
4956 static void initialize_aarch64_code_model (void);
4957
4958 /* Parse the architecture extension string. */
4959
4960 static void
4961 aarch64_parse_extension (char *str)
4962 {
4963 /* The extension string is parsed left to right. */
4964 const struct aarch64_option_extension *opt = NULL;
4965
4966 /* Flag to say whether we are adding or removing an extension. */
4967 int adding_ext = -1;
4968
4969 while (str != NULL && *str != 0)
4970 {
4971 char *ext;
4972 size_t len;
4973
4974 str++;
4975 ext = strchr (str, '+');
4976
4977 if (ext != NULL)
4978 len = ext - str;
4979 else
4980 len = strlen (str);
4981
4982 if (len >= 2 && strncmp (str, "no", 2) == 0)
4983 {
4984 adding_ext = 0;
4985 len -= 2;
4986 str += 2;
4987 }
4988 else if (len > 0)
4989 adding_ext = 1;
4990
4991 if (len == 0)
4992 {
4993 error ("missing feature modifier after %qs", "+no");
4994 return;
4995 }
4996
4997 /* Scan over the extensions table trying to find an exact match. */
4998 for (opt = all_extensions; opt->name != NULL; opt++)
4999 {
5000 if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0)
5001 {
5002 /* Add or remove the extension. */
5003 if (adding_ext)
5004 aarch64_isa_flags |= opt->flags_on;
5005 else
5006 aarch64_isa_flags &= ~(opt->flags_off);
5007 break;
5008 }
5009 }
5010
5011 if (opt->name == NULL)
5012 {
5013 /* Extension not found in list. */
5014 error ("unknown feature modifier %qs", str);
5015 return;
5016 }
5017
5018 str = ext;
5019 };
5020
5021 return;
5022 }
5023
5024 /* Parse the ARCH string. */
5025
5026 static void
5027 aarch64_parse_arch (void)
5028 {
5029 char *ext;
5030 const struct processor *arch;
5031 char *str = (char *) alloca (strlen (aarch64_arch_string) + 1);
5032 size_t len;
5033
5034 strcpy (str, aarch64_arch_string);
5035
5036 ext = strchr (str, '+');
5037
5038 if (ext != NULL)
5039 len = ext - str;
5040 else
5041 len = strlen (str);
5042
5043 if (len == 0)
5044 {
5045 error ("missing arch name in -march=%qs", str);
5046 return;
5047 }
5048
5049 /* Loop through the list of supported ARCHs to find a match. */
5050 for (arch = all_architectures; arch->name != NULL; arch++)
5051 {
5052 if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
5053 {
5054 selected_arch = arch;
5055 aarch64_isa_flags = selected_arch->flags;
5056 selected_cpu = &all_cores[selected_arch->core];
5057
5058 if (ext != NULL)
5059 {
5060 /* ARCH string contains at least one extension. */
5061 aarch64_parse_extension (ext);
5062 }
5063
5064 return;
5065 }
5066 }
5067
5068 /* ARCH name not found in list. */
5069 error ("unknown value %qs for -march", str);
5070 return;
5071 }
5072
5073 /* Parse the CPU string. */
5074
5075 static void
5076 aarch64_parse_cpu (void)
5077 {
5078 char *ext;
5079 const struct processor *cpu;
5080 char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1);
5081 size_t len;
5082
5083 strcpy (str, aarch64_cpu_string);
5084
5085 ext = strchr (str, '+');
5086
5087 if (ext != NULL)
5088 len = ext - str;
5089 else
5090 len = strlen (str);
5091
5092 if (len == 0)
5093 {
5094 error ("missing cpu name in -mcpu=%qs", str);
5095 return;
5096 }
5097
5098 /* Loop through the list of supported CPUs to find a match. */
5099 for (cpu = all_cores; cpu->name != NULL; cpu++)
5100 {
5101 if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
5102 {
5103 selected_cpu = cpu;
5104 aarch64_isa_flags = selected_cpu->flags;
5105
5106 if (ext != NULL)
5107 {
5108 /* CPU string contains at least one extension. */
5109 aarch64_parse_extension (ext);
5110 }
5111
5112 return;
5113 }
5114 }
5115
5116 /* CPU name not found in list. */
5117 error ("unknown value %qs for -mcpu", str);
5118 return;
5119 }
5120
5121 /* Parse the TUNE string. */
5122
5123 static void
5124 aarch64_parse_tune (void)
5125 {
5126 const struct processor *cpu;
5127 char *str = (char *) alloca (strlen (aarch64_tune_string) + 1);
5128 strcpy (str, aarch64_tune_string);
5129
5130 /* Loop through the list of supported CPUs to find a match. */
5131 for (cpu = all_cores; cpu->name != NULL; cpu++)
5132 {
5133 if (strcmp (cpu->name, str) == 0)
5134 {
5135 selected_tune = cpu;
5136 return;
5137 }
5138 }
5139
5140 /* CPU name not found in list. */
5141 error ("unknown value %qs for -mtune", str);
5142 return;
5143 }
5144
5145
5146 /* Implement TARGET_OPTION_OVERRIDE. */
5147
5148 static void
5149 aarch64_override_options (void)
5150 {
5151 /* march wins over mcpu, so when march is defined, mcpu takes the same value,
5152 otherwise march remains undefined. mtune can be used with either march or
5153 mcpu. */
5154
5155 if (aarch64_arch_string)
5156 {
5157 aarch64_parse_arch ();
5158 aarch64_cpu_string = NULL;
5159 }
5160
5161 if (aarch64_cpu_string)
5162 {
5163 aarch64_parse_cpu ();
5164 selected_arch = NULL;
5165 }
5166
5167 if (aarch64_tune_string)
5168 {
5169 aarch64_parse_tune ();
5170 }
5171
5172 initialize_aarch64_code_model ();
5173
5174 aarch64_build_bitmask_table ();
5175
5176 /* This target defaults to strict volatile bitfields. */
5177 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
5178 flag_strict_volatile_bitfields = 1;
5179
5180 /* If the user did not specify a processor, choose the default
5181 one for them. This will be the CPU set during configuration using
5182 --with-cpu, otherwise it is "generic". */
5183 if (!selected_cpu)
5184 {
5185 selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
5186 aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
5187 }
5188
5189 gcc_assert (selected_cpu);
5190
5191 /* The selected cpu may be an architecture, so lookup tuning by core ID. */
5192 if (!selected_tune)
5193 selected_tune = &all_cores[selected_cpu->core];
5194
5195 aarch64_tune_flags = selected_tune->flags;
5196 aarch64_tune = selected_tune->core;
5197 aarch64_tune_params = selected_tune->tune;
5198
5199 aarch64_override_options_after_change ();
5200 }
5201
5202 /* Implement targetm.override_options_after_change. */
5203
5204 static void
5205 aarch64_override_options_after_change (void)
5206 {
5207 faked_omit_frame_pointer = false;
5208
5209 /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so
5210 that aarch64_frame_pointer_required will be called. We need to remember
5211 whether flag_omit_frame_pointer was turned on normally or just faked. */
5212
5213 if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer)
5214 {
5215 flag_omit_frame_pointer = true;
5216 faked_omit_frame_pointer = true;
5217 }
5218 }
5219
5220 static struct machine_function *
5221 aarch64_init_machine_status (void)
5222 {
5223 struct machine_function *machine;
5224 machine = ggc_alloc_cleared_machine_function ();
5225 return machine;
5226 }
5227
5228 void
5229 aarch64_init_expanders (void)
5230 {
5231 init_machine_status = aarch64_init_machine_status;
5232 }
5233
5234 /* A checking mechanism for the implementation of the various code models. */
5235 static void
5236 initialize_aarch64_code_model (void)
5237 {
5238 if (flag_pic)
5239 {
5240 switch (aarch64_cmodel_var)
5241 {
5242 case AARCH64_CMODEL_TINY:
5243 aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
5244 break;
5245 case AARCH64_CMODEL_SMALL:
5246 aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
5247 break;
5248 case AARCH64_CMODEL_LARGE:
5249 sorry ("code model %qs with -f%s", "large",
5250 flag_pic > 1 ? "PIC" : "pic");
5251 default:
5252 gcc_unreachable ();
5253 }
5254 }
5255 else
5256 aarch64_cmodel = aarch64_cmodel_var;
5257 }
5258
5259 /* Return true if SYMBOL_REF X binds locally. */
5260
5261 static bool
5262 aarch64_symbol_binds_local_p (const_rtx x)
5263 {
5264 return (SYMBOL_REF_DECL (x)
5265 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
5266 : SYMBOL_REF_LOCAL_P (x));
5267 }
5268
5269 /* Return true if SYMBOL_REF X is thread local */
5270 static bool
5271 aarch64_tls_symbol_p (rtx x)
5272 {
5273 if (! TARGET_HAVE_TLS)
5274 return false;
5275
5276 if (GET_CODE (x) != SYMBOL_REF)
5277 return false;
5278
5279 return SYMBOL_REF_TLS_MODEL (x) != 0;
5280 }
5281
5282 /* Classify a TLS symbol into one of the TLS kinds. */
5283 enum aarch64_symbol_type
5284 aarch64_classify_tls_symbol (rtx x)
5285 {
5286 enum tls_model tls_kind = tls_symbolic_operand_type (x);
5287
5288 switch (tls_kind)
5289 {
5290 case TLS_MODEL_GLOBAL_DYNAMIC:
5291 case TLS_MODEL_LOCAL_DYNAMIC:
5292 return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
5293
5294 case TLS_MODEL_INITIAL_EXEC:
5295 return SYMBOL_SMALL_GOTTPREL;
5296
5297 case TLS_MODEL_LOCAL_EXEC:
5298 return SYMBOL_SMALL_TPREL;
5299
5300 case TLS_MODEL_EMULATED:
5301 case TLS_MODEL_NONE:
5302 return SYMBOL_FORCE_TO_MEM;
5303
5304 default:
5305 gcc_unreachable ();
5306 }
5307 }
5308
5309 /* Return the method that should be used to access SYMBOL_REF or
5310 LABEL_REF X in context CONTEXT. */
5311
5312 enum aarch64_symbol_type
5313 aarch64_classify_symbol (rtx x,
5314 enum aarch64_symbol_context context ATTRIBUTE_UNUSED)
5315 {
5316 if (GET_CODE (x) == LABEL_REF)
5317 {
5318 switch (aarch64_cmodel)
5319 {
5320 case AARCH64_CMODEL_LARGE:
5321 return SYMBOL_FORCE_TO_MEM;
5322
5323 case AARCH64_CMODEL_TINY_PIC:
5324 case AARCH64_CMODEL_TINY:
5325 return SYMBOL_TINY_ABSOLUTE;
5326
5327 case AARCH64_CMODEL_SMALL_PIC:
5328 case AARCH64_CMODEL_SMALL:
5329 return SYMBOL_SMALL_ABSOLUTE;
5330
5331 default:
5332 gcc_unreachable ();
5333 }
5334 }
5335
5336 if (GET_CODE (x) == SYMBOL_REF)
5337 {
5338 if (aarch64_cmodel == AARCH64_CMODEL_LARGE
5339 || CONSTANT_POOL_ADDRESS_P (x))
5340 return SYMBOL_FORCE_TO_MEM;
5341
5342 if (aarch64_tls_symbol_p (x))
5343 return aarch64_classify_tls_symbol (x);
5344
5345 switch (aarch64_cmodel)
5346 {
5347 case AARCH64_CMODEL_TINY:
5348 if (SYMBOL_REF_WEAK (x))
5349 return SYMBOL_FORCE_TO_MEM;
5350 return SYMBOL_TINY_ABSOLUTE;
5351
5352 case AARCH64_CMODEL_SMALL:
5353 if (SYMBOL_REF_WEAK (x))
5354 return SYMBOL_FORCE_TO_MEM;
5355 return SYMBOL_SMALL_ABSOLUTE;
5356
5357 case AARCH64_CMODEL_TINY_PIC:
5358 if (!aarch64_symbol_binds_local_p (x))
5359 return SYMBOL_TINY_GOT;
5360 return SYMBOL_TINY_ABSOLUTE;
5361
5362 case AARCH64_CMODEL_SMALL_PIC:
5363 if (!aarch64_symbol_binds_local_p (x))
5364 return SYMBOL_SMALL_GOT;
5365 return SYMBOL_SMALL_ABSOLUTE;
5366
5367 default:
5368 gcc_unreachable ();
5369 }
5370 }
5371
5372 /* By default push everything into the constant pool. */
5373 return SYMBOL_FORCE_TO_MEM;
5374 }
5375
5376 bool
5377 aarch64_constant_address_p (rtx x)
5378 {
5379 return (CONSTANT_P (x) && memory_address_p (DImode, x));
5380 }
5381
5382 bool
5383 aarch64_legitimate_pic_operand_p (rtx x)
5384 {
5385 if (GET_CODE (x) == SYMBOL_REF
5386 || (GET_CODE (x) == CONST
5387 && GET_CODE (XEXP (x, 0)) == PLUS
5388 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
5389 return false;
5390
5391 return true;
5392 }
5393
5394 /* Return true if X holds either a quarter-precision or
5395 floating-point +0.0 constant. */
5396 static bool
5397 aarch64_valid_floating_const (enum machine_mode mode, rtx x)
5398 {
5399 if (!CONST_DOUBLE_P (x))
5400 return false;
5401
5402 /* TODO: We could handle moving 0.0 to a TFmode register,
5403 but first we would like to refactor the movtf_aarch64
5404 to be more amicable to split moves properly and
5405 correctly gate on TARGET_SIMD. For now - reject all
5406 constants which are not to SFmode or DFmode registers. */
5407 if (!(mode == SFmode || mode == DFmode))
5408 return false;
5409
5410 if (aarch64_float_const_zero_rtx_p (x))
5411 return true;
5412 return aarch64_float_const_representable_p (x);
5413 }
5414
5415 static bool
5416 aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
5417 {
5418 /* Do not allow vector struct mode constants. We could support
5419 0 and -1 easily, but they need support in aarch64-simd.md. */
5420 if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
5421 return false;
5422
5423 /* This could probably go away because
5424 we now decompose CONST_INTs according to expand_mov_immediate. */
5425 if ((GET_CODE (x) == CONST_VECTOR
5426 && aarch64_simd_valid_immediate (x, mode, false, NULL))
5427 || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
5428 return !targetm.cannot_force_const_mem (mode, x);
5429
5430 if (GET_CODE (x) == HIGH
5431 && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
5432 return true;
5433
5434 return aarch64_constant_address_p (x);
5435 }
5436
5437 rtx
5438 aarch64_load_tp (rtx target)
5439 {
5440 if (!target
5441 || GET_MODE (target) != Pmode
5442 || !register_operand (target, Pmode))
5443 target = gen_reg_rtx (Pmode);
5444
5445 /* Can return in any reg. */
5446 emit_insn (gen_aarch64_load_tp_hard (target));
5447 return target;
5448 }
5449
5450 /* On AAPCS systems, this is the "struct __va_list". */
5451 static GTY(()) tree va_list_type;
5452
5453 /* Implement TARGET_BUILD_BUILTIN_VA_LIST.
5454 Return the type to use as __builtin_va_list.
5455
5456 AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
5457
5458 struct __va_list
5459 {
5460 void *__stack;
5461 void *__gr_top;
5462 void *__vr_top;
5463 int __gr_offs;
5464 int __vr_offs;
5465 }; */
5466
5467 static tree
5468 aarch64_build_builtin_va_list (void)
5469 {
5470 tree va_list_name;
5471 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
5472
5473 /* Create the type. */
5474 va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
5475 /* Give it the required name. */
5476 va_list_name = build_decl (BUILTINS_LOCATION,
5477 TYPE_DECL,
5478 get_identifier ("__va_list"),
5479 va_list_type);
5480 DECL_ARTIFICIAL (va_list_name) = 1;
5481 TYPE_NAME (va_list_type) = va_list_name;
5482 TYPE_STUB_DECL (va_list_type) = va_list_name;
5483
5484 /* Create the fields. */
5485 f_stack = build_decl (BUILTINS_LOCATION,
5486 FIELD_DECL, get_identifier ("__stack"),
5487 ptr_type_node);
5488 f_grtop = build_decl (BUILTINS_LOCATION,
5489 FIELD_DECL, get_identifier ("__gr_top"),
5490 ptr_type_node);
5491 f_vrtop = build_decl (BUILTINS_LOCATION,
5492 FIELD_DECL, get_identifier ("__vr_top"),
5493 ptr_type_node);
5494 f_groff = build_decl (BUILTINS_LOCATION,
5495 FIELD_DECL, get_identifier ("__gr_offs"),
5496 integer_type_node);
5497 f_vroff = build_decl (BUILTINS_LOCATION,
5498 FIELD_DECL, get_identifier ("__vr_offs"),
5499 integer_type_node);
5500
5501 DECL_ARTIFICIAL (f_stack) = 1;
5502 DECL_ARTIFICIAL (f_grtop) = 1;
5503 DECL_ARTIFICIAL (f_vrtop) = 1;
5504 DECL_ARTIFICIAL (f_groff) = 1;
5505 DECL_ARTIFICIAL (f_vroff) = 1;
5506
5507 DECL_FIELD_CONTEXT (f_stack) = va_list_type;
5508 DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
5509 DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
5510 DECL_FIELD_CONTEXT (f_groff) = va_list_type;
5511 DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
5512
5513 TYPE_FIELDS (va_list_type) = f_stack;
5514 DECL_CHAIN (f_stack) = f_grtop;
5515 DECL_CHAIN (f_grtop) = f_vrtop;
5516 DECL_CHAIN (f_vrtop) = f_groff;
5517 DECL_CHAIN (f_groff) = f_vroff;
5518
5519 /* Compute its layout. */
5520 layout_type (va_list_type);
5521
5522 return va_list_type;
5523 }
5524
5525 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5526 static void
5527 aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
5528 {
5529 const CUMULATIVE_ARGS *cum;
5530 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
5531 tree stack, grtop, vrtop, groff, vroff;
5532 tree t;
5533 int gr_save_area_size;
5534 int vr_save_area_size;
5535 int vr_offset;
5536
5537 cum = &crtl->args.info;
5538 gr_save_area_size
5539 = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD;
5540 vr_save_area_size
5541 = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG;
5542
5543 if (TARGET_GENERAL_REGS_ONLY)
5544 {
5545 if (cum->aapcs_nvrn > 0)
5546 sorry ("%qs and floating point or vector arguments",
5547 "-mgeneral-regs-only");
5548 vr_save_area_size = 0;
5549 }
5550
5551 f_stack = TYPE_FIELDS (va_list_type_node);
5552 f_grtop = DECL_CHAIN (f_stack);
5553 f_vrtop = DECL_CHAIN (f_grtop);
5554 f_groff = DECL_CHAIN (f_vrtop);
5555 f_vroff = DECL_CHAIN (f_groff);
5556
5557 stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
5558 NULL_TREE);
5559 grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
5560 NULL_TREE);
5561 vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
5562 NULL_TREE);
5563 groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
5564 NULL_TREE);
5565 vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
5566 NULL_TREE);
5567
5568 /* Emit code to initialize STACK, which points to the next varargs stack
5569 argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
5570 by named arguments. STACK is 8-byte aligned. */
5571 t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
5572 if (cum->aapcs_stack_size > 0)
5573 t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
5574 t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
5575 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5576
5577 /* Emit code to initialize GRTOP, the top of the GR save area.
5578 virtual_incoming_args_rtx should have been 16 byte aligned. */
5579 t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
5580 t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
5581 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5582
5583 /* Emit code to initialize VRTOP, the top of the VR save area.
5584 This address is gr_save_area_bytes below GRTOP, rounded
5585 down to the next 16-byte boundary. */
5586 t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
5587 vr_offset = AARCH64_ROUND_UP (gr_save_area_size,
5588 STACK_BOUNDARY / BITS_PER_UNIT);
5589
5590 if (vr_offset)
5591 t = fold_build_pointer_plus_hwi (t, -vr_offset);
5592 t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
5593 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5594
5595 /* Emit code to initialize GROFF, the offset from GRTOP of the
5596 next GPR argument. */
5597 t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
5598 build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
5599 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5600
5601 /* Likewise emit code to initialize VROFF, the offset from FTOP
5602 of the next VR argument. */
5603 t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
5604 build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
5605 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5606 }
5607
5608 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5609
5610 static tree
5611 aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5612 gimple_seq *post_p ATTRIBUTE_UNUSED)
5613 {
5614 tree addr;
5615 bool indirect_p;
5616 bool is_ha; /* is HFA or HVA. */
5617 bool dw_align; /* double-word align. */
5618 enum machine_mode ag_mode = VOIDmode;
5619 int nregs;
5620 enum machine_mode mode;
5621
5622 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
5623 tree stack, f_top, f_off, off, arg, roundup, on_stack;
5624 HOST_WIDE_INT size, rsize, adjust, align;
5625 tree t, u, cond1, cond2;
5626
5627 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5628 if (indirect_p)
5629 type = build_pointer_type (type);
5630
5631 mode = TYPE_MODE (type);
5632
5633 f_stack = TYPE_FIELDS (va_list_type_node);
5634 f_grtop = DECL_CHAIN (f_stack);
5635 f_vrtop = DECL_CHAIN (f_grtop);
5636 f_groff = DECL_CHAIN (f_vrtop);
5637 f_vroff = DECL_CHAIN (f_groff);
5638
5639 stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
5640 f_stack, NULL_TREE);
5641 size = int_size_in_bytes (type);
5642 align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
5643
5644 dw_align = false;
5645 adjust = 0;
5646 if (aarch64_vfp_is_call_or_return_candidate (mode,
5647 type,
5648 &ag_mode,
5649 &nregs,
5650 &is_ha))
5651 {
5652 /* TYPE passed in fp/simd registers. */
5653 if (TARGET_GENERAL_REGS_ONLY)
5654 sorry ("%qs and floating point or vector arguments",
5655 "-mgeneral-regs-only");
5656
5657 f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
5658 unshare_expr (valist), f_vrtop, NULL_TREE);
5659 f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
5660 unshare_expr (valist), f_vroff, NULL_TREE);
5661
5662 rsize = nregs * UNITS_PER_VREG;
5663
5664 if (is_ha)
5665 {
5666 if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
5667 adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
5668 }
5669 else if (BLOCK_REG_PADDING (mode, type, 1) == downward
5670 && size < UNITS_PER_VREG)
5671 {
5672 adjust = UNITS_PER_VREG - size;
5673 }
5674 }
5675 else
5676 {
5677 /* TYPE passed in general registers. */
5678 f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
5679 unshare_expr (valist), f_grtop, NULL_TREE);
5680 f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
5681 unshare_expr (valist), f_groff, NULL_TREE);
5682 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5683 nregs = rsize / UNITS_PER_WORD;
5684
5685 if (align > 8)
5686 dw_align = true;
5687
5688 if (BLOCK_REG_PADDING (mode, type, 1) == downward
5689 && size < UNITS_PER_WORD)
5690 {
5691 adjust = UNITS_PER_WORD - size;
5692 }
5693 }
5694
5695 /* Get a local temporary for the field value. */
5696 off = get_initialized_tmp_var (f_off, pre_p, NULL);
5697
5698 /* Emit code to branch if off >= 0. */
5699 t = build2 (GE_EXPR, boolean_type_node, off,
5700 build_int_cst (TREE_TYPE (off), 0));
5701 cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5702
5703 if (dw_align)
5704 {
5705 /* Emit: offs = (offs + 15) & -16. */
5706 t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
5707 build_int_cst (TREE_TYPE (off), 15));
5708 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
5709 build_int_cst (TREE_TYPE (off), -16));
5710 roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
5711 }
5712 else
5713 roundup = NULL;
5714
5715 /* Update ap.__[g|v]r_offs */
5716 t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
5717 build_int_cst (TREE_TYPE (off), rsize));
5718 t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
5719
5720 /* String up. */
5721 if (roundup)
5722 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
5723
5724 /* [cond2] if (ap.__[g|v]r_offs > 0) */
5725 u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
5726 build_int_cst (TREE_TYPE (f_off), 0));
5727 cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
5728
5729 /* String up: make sure the assignment happens before the use. */
5730 t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
5731 COND_EXPR_ELSE (cond1) = t;
5732
5733 /* Prepare the trees handling the argument that is passed on the stack;
5734 the top level node will store in ON_STACK. */
5735 arg = get_initialized_tmp_var (stack, pre_p, NULL);
5736 if (align > 8)
5737 {
5738 /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
5739 t = fold_convert (intDI_type_node, arg);
5740 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
5741 build_int_cst (TREE_TYPE (t), 15));
5742 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5743 build_int_cst (TREE_TYPE (t), -16));
5744 t = fold_convert (TREE_TYPE (arg), t);
5745 roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
5746 }
5747 else
5748 roundup = NULL;
5749 /* Advance ap.__stack */
5750 t = fold_convert (intDI_type_node, arg);
5751 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
5752 build_int_cst (TREE_TYPE (t), size + 7));
5753 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5754 build_int_cst (TREE_TYPE (t), -8));
5755 t = fold_convert (TREE_TYPE (arg), t);
5756 t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
5757 /* String up roundup and advance. */
5758 if (roundup)
5759 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
5760 /* String up with arg */
5761 on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
5762 /* Big-endianness related address adjustment. */
5763 if (BLOCK_REG_PADDING (mode, type, 1) == downward
5764 && size < UNITS_PER_WORD)
5765 {
5766 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
5767 size_int (UNITS_PER_WORD - size));
5768 on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
5769 }
5770
5771 COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
5772 COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
5773
5774 /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
5775 t = off;
5776 if (adjust)
5777 t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
5778 build_int_cst (TREE_TYPE (off), adjust));
5779
5780 t = fold_convert (sizetype, t);
5781 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
5782
5783 if (is_ha)
5784 {
5785 /* type ha; // treat as "struct {ftype field[n];}"
5786 ... [computing offs]
5787 for (i = 0; i <nregs; ++i, offs += 16)
5788 ha.field[i] = *((ftype *)(ap.__vr_top + offs));
5789 return ha; */
5790 int i;
5791 tree tmp_ha, field_t, field_ptr_t;
5792
5793 /* Declare a local variable. */
5794 tmp_ha = create_tmp_var_raw (type, "ha");
5795 gimple_add_tmp_var (tmp_ha);
5796
5797 /* Establish the base type. */
5798 switch (ag_mode)
5799 {
5800 case SFmode:
5801 field_t = float_type_node;
5802 field_ptr_t = float_ptr_type_node;
5803 break;
5804 case DFmode:
5805 field_t = double_type_node;
5806 field_ptr_t = double_ptr_type_node;
5807 break;
5808 case TFmode:
5809 field_t = long_double_type_node;
5810 field_ptr_t = long_double_ptr_type_node;
5811 break;
5812 /* The half precision and quad precision are not fully supported yet. Enable
5813 the following code after the support is complete. Need to find the correct
5814 type node for __fp16 *. */
5815 #if 0
5816 case HFmode:
5817 field_t = float_type_node;
5818 field_ptr_t = float_ptr_type_node;
5819 break;
5820 #endif
5821 case V2SImode:
5822 case V4SImode:
5823 {
5824 tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
5825 field_t = build_vector_type_for_mode (innertype, ag_mode);
5826 field_ptr_t = build_pointer_type (field_t);
5827 }
5828 break;
5829 default:
5830 gcc_assert (0);
5831 }
5832
5833 /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
5834 tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
5835 addr = t;
5836 t = fold_convert (field_ptr_t, addr);
5837 t = build2 (MODIFY_EXPR, field_t,
5838 build1 (INDIRECT_REF, field_t, tmp_ha),
5839 build1 (INDIRECT_REF, field_t, t));
5840
5841 /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
5842 for (i = 1; i < nregs; ++i)
5843 {
5844 addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
5845 u = fold_convert (field_ptr_t, addr);
5846 u = build2 (MODIFY_EXPR, field_t,
5847 build2 (MEM_REF, field_t, tmp_ha,
5848 build_int_cst (field_ptr_t,
5849 (i *
5850 int_size_in_bytes (field_t)))),
5851 build1 (INDIRECT_REF, field_t, u));
5852 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
5853 }
5854
5855 u = fold_convert (TREE_TYPE (f_top), tmp_ha);
5856 t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
5857 }
5858
5859 COND_EXPR_ELSE (cond2) = t;
5860 addr = fold_convert (build_pointer_type (type), cond1);
5861 addr = build_va_arg_indirect_ref (addr);
5862
5863 if (indirect_p)
5864 addr = build_va_arg_indirect_ref (addr);
5865
5866 return addr;
5867 }
5868
5869 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
5870
5871 static void
5872 aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
5873 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5874 int no_rtl)
5875 {
5876 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5877 CUMULATIVE_ARGS local_cum;
5878 int gr_saved, vr_saved;
5879
5880 /* The caller has advanced CUM up to, but not beyond, the last named
5881 argument. Advance a local copy of CUM past the last "real" named
5882 argument, to find out how many registers are left over. */
5883 local_cum = *cum;
5884 aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
5885
5886 /* Found out how many registers we need to save. */
5887 gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn;
5888 vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn;
5889
5890 if (TARGET_GENERAL_REGS_ONLY)
5891 {
5892 if (local_cum.aapcs_nvrn > 0)
5893 sorry ("%qs and floating point or vector arguments",
5894 "-mgeneral-regs-only");
5895 vr_saved = 0;
5896 }
5897
5898 if (!no_rtl)
5899 {
5900 if (gr_saved > 0)
5901 {
5902 rtx ptr, mem;
5903
5904 /* virtual_incoming_args_rtx should have been 16-byte aligned. */
5905 ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
5906 - gr_saved * UNITS_PER_WORD);
5907 mem = gen_frame_mem (BLKmode, ptr);
5908 set_mem_alias_set (mem, get_varargs_alias_set ());
5909
5910 move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
5911 mem, gr_saved);
5912 }
5913 if (vr_saved > 0)
5914 {
5915 /* We can't use move_block_from_reg, because it will use
5916 the wrong mode, storing D regs only. */
5917 enum machine_mode mode = TImode;
5918 int off, i;
5919
5920 /* Set OFF to the offset from virtual_incoming_args_rtx of
5921 the first vector register. The VR save area lies below
5922 the GR one, and is aligned to 16 bytes. */
5923 off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
5924 STACK_BOUNDARY / BITS_PER_UNIT);
5925 off -= vr_saved * UNITS_PER_VREG;
5926
5927 for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i)
5928 {
5929 rtx ptr, mem;
5930
5931 ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
5932 mem = gen_frame_mem (mode, ptr);
5933 set_mem_alias_set (mem, get_varargs_alias_set ());
5934 aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i));
5935 off += UNITS_PER_VREG;
5936 }
5937 }
5938 }
5939
5940 /* We don't save the size into *PRETEND_SIZE because we want to avoid
5941 any complication of having crtl->args.pretend_args_size changed. */
5942 cfun->machine->saved_varargs_size
5943 = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
5944 STACK_BOUNDARY / BITS_PER_UNIT)
5945 + vr_saved * UNITS_PER_VREG);
5946 }
5947
5948 static void
5949 aarch64_conditional_register_usage (void)
5950 {
5951 int i;
5952 if (!TARGET_FLOAT)
5953 {
5954 for (i = V0_REGNUM; i <= V31_REGNUM; i++)
5955 {
5956 fixed_regs[i] = 1;
5957 call_used_regs[i] = 1;
5958 }
5959 }
5960 }
5961
5962 /* Walk down the type tree of TYPE counting consecutive base elements.
5963 If *MODEP is VOIDmode, then set it to the first valid floating point
5964 type. If a non-floating point type is found, or if a floating point
5965 type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
5966 otherwise return the count in the sub-tree. */
5967 static int
5968 aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
5969 {
5970 enum machine_mode mode;
5971 HOST_WIDE_INT size;
5972
5973 switch (TREE_CODE (type))
5974 {
5975 case REAL_TYPE:
5976 mode = TYPE_MODE (type);
5977 if (mode != DFmode && mode != SFmode && mode != TFmode)
5978 return -1;
5979
5980 if (*modep == VOIDmode)
5981 *modep = mode;
5982
5983 if (*modep == mode)
5984 return 1;
5985
5986 break;
5987
5988 case COMPLEX_TYPE:
5989 mode = TYPE_MODE (TREE_TYPE (type));
5990 if (mode != DFmode && mode != SFmode && mode != TFmode)
5991 return -1;
5992
5993 if (*modep == VOIDmode)
5994 *modep = mode;
5995
5996 if (*modep == mode)
5997 return 2;
5998
5999 break;
6000
6001 case VECTOR_TYPE:
6002 /* Use V2SImode and V4SImode as representatives of all 64-bit
6003 and 128-bit vector types. */
6004 size = int_size_in_bytes (type);
6005 switch (size)
6006 {
6007 case 8:
6008 mode = V2SImode;
6009 break;
6010 case 16:
6011 mode = V4SImode;
6012 break;
6013 default:
6014 return -1;
6015 }
6016
6017 if (*modep == VOIDmode)
6018 *modep = mode;
6019
6020 /* Vector modes are considered to be opaque: two vectors are
6021 equivalent for the purposes of being homogeneous aggregates
6022 if they are the same size. */
6023 if (*modep == mode)
6024 return 1;
6025
6026 break;
6027
6028 case ARRAY_TYPE:
6029 {
6030 int count;
6031 tree index = TYPE_DOMAIN (type);
6032
6033 /* Can't handle incomplete types. */
6034 if (!COMPLETE_TYPE_P (type))
6035 return -1;
6036
6037 count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
6038 if (count == -1
6039 || !index
6040 || !TYPE_MAX_VALUE (index)
6041 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
6042 || !TYPE_MIN_VALUE (index)
6043 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
6044 || count < 0)
6045 return -1;
6046
6047 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
6048 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
6049
6050 /* There must be no padding. */
6051 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
6052 return -1;
6053
6054 return count;
6055 }
6056
6057 case RECORD_TYPE:
6058 {
6059 int count = 0;
6060 int sub_count;
6061 tree field;
6062
6063 /* Can't handle incomplete types. */
6064 if (!COMPLETE_TYPE_P (type))
6065 return -1;
6066
6067 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6068 {
6069 if (TREE_CODE (field) != FIELD_DECL)
6070 continue;
6071
6072 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
6073 if (sub_count < 0)
6074 return -1;
6075 count += sub_count;
6076 }
6077
6078 /* There must be no padding. */
6079 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
6080 return -1;
6081
6082 return count;
6083 }
6084
6085 case UNION_TYPE:
6086 case QUAL_UNION_TYPE:
6087 {
6088 /* These aren't very interesting except in a degenerate case. */
6089 int count = 0;
6090 int sub_count;
6091 tree field;
6092
6093 /* Can't handle incomplete types. */
6094 if (!COMPLETE_TYPE_P (type))
6095 return -1;
6096
6097 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6098 {
6099 if (TREE_CODE (field) != FIELD_DECL)
6100 continue;
6101
6102 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
6103 if (sub_count < 0)
6104 return -1;
6105 count = count > sub_count ? count : sub_count;
6106 }
6107
6108 /* There must be no padding. */
6109 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
6110 return -1;
6111
6112 return count;
6113 }
6114
6115 default:
6116 break;
6117 }
6118
6119 return -1;
6120 }
6121
6122 /* Return true if we use LRA instead of reload pass. */
6123 static bool
6124 aarch64_lra_p (void)
6125 {
6126 return aarch64_lra_flag;
6127 }
6128
6129 /* Return TRUE if the type, as described by TYPE and MODE, is a composite
6130 type as described in AAPCS64 \S 4.3. This includes aggregate, union and
6131 array types. The C99 floating-point complex types are also considered
6132 as composite types, according to AAPCS64 \S 7.1.1. The complex integer
6133 types, which are GCC extensions and out of the scope of AAPCS64, are
6134 treated as composite types here as well.
6135
6136 Note that MODE itself is not sufficient in determining whether a type
6137 is such a composite type or not. This is because
6138 stor-layout.c:compute_record_mode may have already changed the MODE
6139 (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
6140 structure with only one field may have its MODE set to the mode of the
6141 field. Also an integer mode whose size matches the size of the
6142 RECORD_TYPE type may be used to substitute the original mode
6143 (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
6144 solely relied on. */
6145
6146 static bool
6147 aarch64_composite_type_p (const_tree type,
6148 enum machine_mode mode)
6149 {
6150 if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
6151 return true;
6152
6153 if (mode == BLKmode
6154 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6155 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
6156 return true;
6157
6158 return false;
6159 }
6160
6161 /* Return TRUE if the type, as described by TYPE and MODE, is a short vector
6162 type as described in AAPCS64 \S 4.1.2.
6163
6164 See the comment above aarch64_composite_type_p for the notes on MODE. */
6165
6166 static bool
6167 aarch64_short_vector_p (const_tree type,
6168 enum machine_mode mode)
6169 {
6170 HOST_WIDE_INT size = -1;
6171
6172 if (type && TREE_CODE (type) == VECTOR_TYPE)
6173 size = int_size_in_bytes (type);
6174 else if (!aarch64_composite_type_p (type, mode)
6175 && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6176 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT))
6177 size = GET_MODE_SIZE (mode);
6178
6179 return (size == 8 || size == 16) ? true : false;
6180 }
6181
6182 /* Return TRUE if an argument, whose type is described by TYPE and MODE,
6183 shall be passed or returned in simd/fp register(s) (providing these
6184 parameter passing registers are available).
6185
6186 Upon successful return, *COUNT returns the number of needed registers,
6187 *BASE_MODE returns the mode of the individual register and when IS_HAF
6188 is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
6189 floating-point aggregate or a homogeneous short-vector aggregate. */
6190
6191 static bool
6192 aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode,
6193 const_tree type,
6194 enum machine_mode *base_mode,
6195 int *count,
6196 bool *is_ha)
6197 {
6198 enum machine_mode new_mode = VOIDmode;
6199 bool composite_p = aarch64_composite_type_p (type, mode);
6200
6201 if (is_ha != NULL) *is_ha = false;
6202
6203 if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
6204 || aarch64_short_vector_p (type, mode))
6205 {
6206 *count = 1;
6207 new_mode = mode;
6208 }
6209 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6210 {
6211 if (is_ha != NULL) *is_ha = true;
6212 *count = 2;
6213 new_mode = GET_MODE_INNER (mode);
6214 }
6215 else if (type && composite_p)
6216 {
6217 int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
6218
6219 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
6220 {
6221 if (is_ha != NULL) *is_ha = true;
6222 *count = ag_count;
6223 }
6224 else
6225 return false;
6226 }
6227 else
6228 return false;
6229
6230 *base_mode = new_mode;
6231 return true;
6232 }
6233
6234 /* Implement TARGET_STRUCT_VALUE_RTX. */
6235
6236 static rtx
6237 aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
6238 int incoming ATTRIBUTE_UNUSED)
6239 {
6240 return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
6241 }
6242
6243 /* Implements target hook vector_mode_supported_p. */
6244 static bool
6245 aarch64_vector_mode_supported_p (enum machine_mode mode)
6246 {
6247 if (TARGET_SIMD
6248 && (mode == V4SImode || mode == V8HImode
6249 || mode == V16QImode || mode == V2DImode
6250 || mode == V2SImode || mode == V4HImode
6251 || mode == V8QImode || mode == V2SFmode
6252 || mode == V4SFmode || mode == V2DFmode))
6253 return true;
6254
6255 return false;
6256 }
6257
6258 /* Return appropriate SIMD container
6259 for MODE within a vector of WIDTH bits. */
6260 static enum machine_mode
6261 aarch64_simd_container_mode (enum machine_mode mode, unsigned width)
6262 {
6263 gcc_assert (width == 64 || width == 128);
6264 if (TARGET_SIMD)
6265 {
6266 if (width == 128)
6267 switch (mode)
6268 {
6269 case DFmode:
6270 return V2DFmode;
6271 case SFmode:
6272 return V4SFmode;
6273 case SImode:
6274 return V4SImode;
6275 case HImode:
6276 return V8HImode;
6277 case QImode:
6278 return V16QImode;
6279 case DImode:
6280 return V2DImode;
6281 default:
6282 break;
6283 }
6284 else
6285 switch (mode)
6286 {
6287 case SFmode:
6288 return V2SFmode;
6289 case SImode:
6290 return V2SImode;
6291 case HImode:
6292 return V4HImode;
6293 case QImode:
6294 return V8QImode;
6295 default:
6296 break;
6297 }
6298 }
6299 return word_mode;
6300 }
6301
6302 /* Return 128-bit container as the preferred SIMD mode for MODE. */
6303 static enum machine_mode
6304 aarch64_preferred_simd_mode (enum machine_mode mode)
6305 {
6306 return aarch64_simd_container_mode (mode, 128);
6307 }
6308
6309 /* Return the bitmask of possible vector sizes for the vectorizer
6310 to iterate over. */
6311 static unsigned int
6312 aarch64_autovectorize_vector_sizes (void)
6313 {
6314 return (16 | 8);
6315 }
6316
6317 /* A table to help perform AArch64-specific name mangling for AdvSIMD
6318 vector types in order to conform to the AAPCS64 (see "Procedure
6319 Call Standard for the ARM 64-bit Architecture", Appendix A). To
6320 qualify for emission with the mangled names defined in that document,
6321 a vector type must not only be of the correct mode but also be
6322 composed of AdvSIMD vector element types (e.g.
6323 _builtin_aarch64_simd_qi); these types are registered by
6324 aarch64_init_simd_builtins (). In other words, vector types defined
6325 in other ways e.g. via vector_size attribute will get default
6326 mangled names. */
6327 typedef struct
6328 {
6329 enum machine_mode mode;
6330 const char *element_type_name;
6331 const char *mangled_name;
6332 } aarch64_simd_mangle_map_entry;
6333
6334 static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
6335 /* 64-bit containerized types. */
6336 { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
6337 { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
6338 { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
6339 { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
6340 { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
6341 { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
6342 { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
6343 { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
6344 { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
6345 /* 128-bit containerized types. */
6346 { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
6347 { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
6348 { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
6349 { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
6350 { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
6351 { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
6352 { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
6353 { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
6354 { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
6355 { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
6356 { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
6357 { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
6358 { VOIDmode, NULL, NULL }
6359 };
6360
6361 /* Implement TARGET_MANGLE_TYPE. */
6362
6363 static const char *
6364 aarch64_mangle_type (const_tree type)
6365 {
6366 /* The AArch64 ABI documents say that "__va_list" has to be
6367 managled as if it is in the "std" namespace. */
6368 if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
6369 return "St9__va_list";
6370
6371 /* Check the mode of the vector type, and the name of the vector
6372 element type, against the table. */
6373 if (TREE_CODE (type) == VECTOR_TYPE)
6374 {
6375 aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
6376
6377 while (pos->mode != VOIDmode)
6378 {
6379 tree elt_type = TREE_TYPE (type);
6380
6381 if (pos->mode == TYPE_MODE (type)
6382 && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
6383 && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
6384 pos->element_type_name))
6385 return pos->mangled_name;
6386
6387 pos++;
6388 }
6389 }
6390
6391 /* Use the default mangling. */
6392 return NULL;
6393 }
6394
6395 /* Return the equivalent letter for size. */
6396 static char
6397 sizetochar (int size)
6398 {
6399 switch (size)
6400 {
6401 case 64: return 'd';
6402 case 32: return 's';
6403 case 16: return 'h';
6404 case 8 : return 'b';
6405 default: gcc_unreachable ();
6406 }
6407 }
6408
6409 /* Return true iff x is a uniform vector of floating-point
6410 constants, and the constant can be represented in
6411 quarter-precision form. Note, as aarch64_float_const_representable
6412 rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */
6413 static bool
6414 aarch64_vect_float_const_representable_p (rtx x)
6415 {
6416 int i = 0;
6417 REAL_VALUE_TYPE r0, ri;
6418 rtx x0, xi;
6419
6420 if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT)
6421 return false;
6422
6423 x0 = CONST_VECTOR_ELT (x, 0);
6424 if (!CONST_DOUBLE_P (x0))
6425 return false;
6426
6427 REAL_VALUE_FROM_CONST_DOUBLE (r0, x0);
6428
6429 for (i = 1; i < CONST_VECTOR_NUNITS (x); i++)
6430 {
6431 xi = CONST_VECTOR_ELT (x, i);
6432 if (!CONST_DOUBLE_P (xi))
6433 return false;
6434
6435 REAL_VALUE_FROM_CONST_DOUBLE (ri, xi);
6436 if (!REAL_VALUES_EQUAL (r0, ri))
6437 return false;
6438 }
6439
6440 return aarch64_float_const_representable_p (x0);
6441 }
6442
6443 /* Return true for valid and false for invalid. */
6444 bool
6445 aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, bool inverse,
6446 struct simd_immediate_info *info)
6447 {
6448 #define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
6449 matches = 1; \
6450 for (i = 0; i < idx; i += (STRIDE)) \
6451 if (!(TEST)) \
6452 matches = 0; \
6453 if (matches) \
6454 { \
6455 immtype = (CLASS); \
6456 elsize = (ELSIZE); \
6457 eshift = (SHIFT); \
6458 emvn = (NEG); \
6459 break; \
6460 }
6461
6462 unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
6463 unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
6464 unsigned char bytes[16];
6465 int immtype = -1, matches;
6466 unsigned int invmask = inverse ? 0xff : 0;
6467 int eshift, emvn;
6468
6469 if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
6470 {
6471 if (! (aarch64_simd_imm_zero_p (op, mode)
6472 || aarch64_vect_float_const_representable_p (op)))
6473 return false;
6474
6475 if (info)
6476 {
6477 info->value = CONST_VECTOR_ELT (op, 0);
6478 info->element_width = GET_MODE_BITSIZE (GET_MODE (info->value));
6479 info->mvn = false;
6480 info->shift = 0;
6481 }
6482
6483 return true;
6484 }
6485
6486 /* Splat vector constant out into a byte vector. */
6487 for (i = 0; i < n_elts; i++)
6488 {
6489 rtx el = CONST_VECTOR_ELT (op, i);
6490 unsigned HOST_WIDE_INT elpart;
6491 unsigned int part, parts;
6492
6493 if (GET_CODE (el) == CONST_INT)
6494 {
6495 elpart = INTVAL (el);
6496 parts = 1;
6497 }
6498 else if (GET_CODE (el) == CONST_DOUBLE)
6499 {
6500 elpart = CONST_DOUBLE_LOW (el);
6501 parts = 2;
6502 }
6503 else
6504 gcc_unreachable ();
6505
6506 for (part = 0; part < parts; part++)
6507 {
6508 unsigned int byte;
6509 for (byte = 0; byte < innersize; byte++)
6510 {
6511 bytes[idx++] = (elpart & 0xff) ^ invmask;
6512 elpart >>= BITS_PER_UNIT;
6513 }
6514 if (GET_CODE (el) == CONST_DOUBLE)
6515 elpart = CONST_DOUBLE_HIGH (el);
6516 }
6517 }
6518
6519 /* Sanity check. */
6520 gcc_assert (idx == GET_MODE_SIZE (mode));
6521
6522 do
6523 {
6524 CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
6525 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
6526
6527 CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
6528 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
6529
6530 CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
6531 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
6532
6533 CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
6534 && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
6535
6536 CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
6537
6538 CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
6539
6540 CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
6541 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
6542
6543 CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
6544 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
6545
6546 CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
6547 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
6548
6549 CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
6550 && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
6551
6552 CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
6553
6554 CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
6555
6556 CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
6557 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
6558
6559 CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
6560 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
6561
6562 CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
6563 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
6564
6565 CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
6566 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
6567
6568 CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
6569
6570 CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
6571 && bytes[i] == bytes[(i + 8) % idx], 0, 0);
6572 }
6573 while (0);
6574
6575 if (immtype == -1)
6576 return false;
6577
6578 if (info)
6579 {
6580 info->element_width = elsize;
6581 info->mvn = emvn != 0;
6582 info->shift = eshift;
6583
6584 unsigned HOST_WIDE_INT imm = 0;
6585
6586 if (immtype >= 12 && immtype <= 15)
6587 info->msl = true;
6588
6589 /* Un-invert bytes of recognized vector, if necessary. */
6590 if (invmask != 0)
6591 for (i = 0; i < idx; i++)
6592 bytes[i] ^= invmask;
6593
6594 if (immtype == 17)
6595 {
6596 /* FIXME: Broken on 32-bit H_W_I hosts. */
6597 gcc_assert (sizeof (HOST_WIDE_INT) == 8);
6598
6599 for (i = 0; i < 8; i++)
6600 imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
6601 << (i * BITS_PER_UNIT);
6602
6603
6604 info->value = GEN_INT (imm);
6605 }
6606 else
6607 {
6608 for (i = 0; i < elsize / BITS_PER_UNIT; i++)
6609 imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
6610
6611 /* Construct 'abcdefgh' because the assembler cannot handle
6612 generic constants. */
6613 if (info->mvn)
6614 imm = ~imm;
6615 imm = (imm >> info->shift) & 0xff;
6616 info->value = GEN_INT (imm);
6617 }
6618 }
6619
6620 return true;
6621 #undef CHECK
6622 }
6623
6624 static bool
6625 aarch64_const_vec_all_same_int_p (rtx x,
6626 HOST_WIDE_INT minval,
6627 HOST_WIDE_INT maxval)
6628 {
6629 HOST_WIDE_INT firstval;
6630 int count, i;
6631
6632 if (GET_CODE (x) != CONST_VECTOR
6633 || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
6634 return false;
6635
6636 firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
6637 if (firstval < minval || firstval > maxval)
6638 return false;
6639
6640 count = CONST_VECTOR_NUNITS (x);
6641 for (i = 1; i < count; i++)
6642 if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
6643 return false;
6644
6645 return true;
6646 }
6647
6648 /* Check of immediate shift constants are within range. */
6649 bool
6650 aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left)
6651 {
6652 int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
6653 if (left)
6654 return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1);
6655 else
6656 return aarch64_const_vec_all_same_int_p (x, 1, bit_width);
6657 }
6658
6659 /* Return true if X is a uniform vector where all elements
6660 are either the floating-point constant 0.0 or the
6661 integer constant 0. */
6662 bool
6663 aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode)
6664 {
6665 return x == CONST0_RTX (mode);
6666 }
6667
6668 bool
6669 aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
6670 {
6671 HOST_WIDE_INT imm = INTVAL (x);
6672 int i;
6673
6674 for (i = 0; i < 8; i++)
6675 {
6676 unsigned int byte = imm & 0xff;
6677 if (byte != 0xff && byte != 0)
6678 return false;
6679 imm >>= 8;
6680 }
6681
6682 return true;
6683 }
6684
6685 bool
6686 aarch64_mov_operand_p (rtx x,
6687 enum aarch64_symbol_context context,
6688 enum machine_mode mode)
6689 {
6690 if (GET_CODE (x) == HIGH
6691 && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
6692 return true;
6693
6694 if (CONST_INT_P (x) && aarch64_move_imm (INTVAL (x), mode))
6695 return true;
6696
6697 if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x))
6698 return true;
6699
6700 return aarch64_classify_symbolic_expression (x, context)
6701 == SYMBOL_TINY_ABSOLUTE;
6702 }
6703
6704 /* Return a const_int vector of VAL. */
6705 rtx
6706 aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val)
6707 {
6708 int nunits = GET_MODE_NUNITS (mode);
6709 rtvec v = rtvec_alloc (nunits);
6710 int i;
6711
6712 for (i=0; i < nunits; i++)
6713 RTVEC_ELT (v, i) = GEN_INT (val);
6714
6715 return gen_rtx_CONST_VECTOR (mode, v);
6716 }
6717
6718 /* Check OP is a legal scalar immediate for the MOVI instruction. */
6719
6720 bool
6721 aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode)
6722 {
6723 enum machine_mode vmode;
6724
6725 gcc_assert (!VECTOR_MODE_P (mode));
6726 vmode = aarch64_preferred_simd_mode (mode);
6727 rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
6728 return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
6729 }
6730
6731 /* Construct and return a PARALLEL RTX vector. */
6732 rtx
6733 aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high)
6734 {
6735 int nunits = GET_MODE_NUNITS (mode);
6736 rtvec v = rtvec_alloc (nunits / 2);
6737 int base = high ? nunits / 2 : 0;
6738 rtx t1;
6739 int i;
6740
6741 for (i=0; i < nunits / 2; i++)
6742 RTVEC_ELT (v, i) = GEN_INT (base + i);
6743
6744 t1 = gen_rtx_PARALLEL (mode, v);
6745 return t1;
6746 }
6747
6748 /* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
6749 HIGH (exclusive). */
6750 void
6751 aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
6752 {
6753 HOST_WIDE_INT lane;
6754 gcc_assert (GET_CODE (operand) == CONST_INT);
6755 lane = INTVAL (operand);
6756
6757 if (lane < low || lane >= high)
6758 error ("lane out of range");
6759 }
6760
6761 void
6762 aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
6763 {
6764 gcc_assert (GET_CODE (operand) == CONST_INT);
6765 HOST_WIDE_INT lane = INTVAL (operand);
6766
6767 if (lane < low || lane >= high)
6768 error ("constant out of range");
6769 }
6770
6771 /* Emit code to reinterpret one AdvSIMD type as another,
6772 without altering bits. */
6773 void
6774 aarch64_simd_reinterpret (rtx dest, rtx src)
6775 {
6776 emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
6777 }
6778
6779 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
6780 registers). */
6781 void
6782 aarch64_simd_emit_pair_result_insn (enum machine_mode mode,
6783 rtx (*intfn) (rtx, rtx, rtx), rtx destaddr,
6784 rtx op1)
6785 {
6786 rtx mem = gen_rtx_MEM (mode, destaddr);
6787 rtx tmp1 = gen_reg_rtx (mode);
6788 rtx tmp2 = gen_reg_rtx (mode);
6789
6790 emit_insn (intfn (tmp1, op1, tmp2));
6791
6792 emit_move_insn (mem, tmp1);
6793 mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
6794 emit_move_insn (mem, tmp2);
6795 }
6796
6797 /* Return TRUE if OP is a valid vector addressing mode. */
6798 bool
6799 aarch64_simd_mem_operand_p (rtx op)
6800 {
6801 return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
6802 || GET_CODE (XEXP (op, 0)) == REG);
6803 }
6804
6805 /* Set up OPERANDS for a register copy from SRC to DEST, taking care
6806 not to early-clobber SRC registers in the process.
6807
6808 We assume that the operands described by SRC and DEST represent a
6809 decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
6810 number of components into which the copy has been decomposed. */
6811 void
6812 aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
6813 rtx *src, unsigned int count)
6814 {
6815 unsigned int i;
6816
6817 if (!reg_overlap_mentioned_p (operands[0], operands[1])
6818 || REGNO (operands[0]) < REGNO (operands[1]))
6819 {
6820 for (i = 0; i < count; i++)
6821 {
6822 operands[2 * i] = dest[i];
6823 operands[2 * i + 1] = src[i];
6824 }
6825 }
6826 else
6827 {
6828 for (i = 0; i < count; i++)
6829 {
6830 operands[2 * i] = dest[count - i - 1];
6831 operands[2 * i + 1] = src[count - i - 1];
6832 }
6833 }
6834 }
6835
6836 /* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
6837 one of VSTRUCT modes: OI, CI or XI. */
6838 int
6839 aarch64_simd_attr_length_move (rtx insn)
6840 {
6841 enum machine_mode mode;
6842
6843 extract_insn_cached (insn);
6844
6845 if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
6846 {
6847 mode = GET_MODE (recog_data.operand[0]);
6848 switch (mode)
6849 {
6850 case OImode:
6851 return 8;
6852 case CImode:
6853 return 12;
6854 case XImode:
6855 return 16;
6856 default:
6857 gcc_unreachable ();
6858 }
6859 }
6860 return 4;
6861 }
6862
6863 /* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum
6864 alignment of a vector to 128 bits. */
6865 static HOST_WIDE_INT
6866 aarch64_simd_vector_alignment (const_tree type)
6867 {
6868 HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
6869 return MIN (align, 128);
6870 }
6871
6872 /* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */
6873 static bool
6874 aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed)
6875 {
6876 if (is_packed)
6877 return false;
6878
6879 /* We guarantee alignment for vectors up to 128-bits. */
6880 if (tree_int_cst_compare (TYPE_SIZE (type),
6881 bitsize_int (BIGGEST_ALIGNMENT)) > 0)
6882 return false;
6883
6884 /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
6885 return true;
6886 }
6887
6888 /* If VALS is a vector constant that can be loaded into a register
6889 using DUP, generate instructions to do so and return an RTX to
6890 assign to the register. Otherwise return NULL_RTX. */
6891 static rtx
6892 aarch64_simd_dup_constant (rtx vals)
6893 {
6894 enum machine_mode mode = GET_MODE (vals);
6895 enum machine_mode inner_mode = GET_MODE_INNER (mode);
6896 int n_elts = GET_MODE_NUNITS (mode);
6897 bool all_same = true;
6898 rtx x;
6899 int i;
6900
6901 if (GET_CODE (vals) != CONST_VECTOR)
6902 return NULL_RTX;
6903
6904 for (i = 1; i < n_elts; ++i)
6905 {
6906 x = CONST_VECTOR_ELT (vals, i);
6907 if (!rtx_equal_p (x, CONST_VECTOR_ELT (vals, 0)))
6908 all_same = false;
6909 }
6910
6911 if (!all_same)
6912 return NULL_RTX;
6913
6914 /* We can load this constant by using DUP and a constant in a
6915 single ARM register. This will be cheaper than a vector
6916 load. */
6917 x = copy_to_mode_reg (inner_mode, CONST_VECTOR_ELT (vals, 0));
6918 return gen_rtx_VEC_DUPLICATE (mode, x);
6919 }
6920
6921
6922 /* Generate code to load VALS, which is a PARALLEL containing only
6923 constants (for vec_init) or CONST_VECTOR, efficiently into a
6924 register. Returns an RTX to copy into the register, or NULL_RTX
6925 for a PARALLEL that can not be converted into a CONST_VECTOR. */
6926 static rtx
6927 aarch64_simd_make_constant (rtx vals)
6928 {
6929 enum machine_mode mode = GET_MODE (vals);
6930 rtx const_dup;
6931 rtx const_vec = NULL_RTX;
6932 int n_elts = GET_MODE_NUNITS (mode);
6933 int n_const = 0;
6934 int i;
6935
6936 if (GET_CODE (vals) == CONST_VECTOR)
6937 const_vec = vals;
6938 else if (GET_CODE (vals) == PARALLEL)
6939 {
6940 /* A CONST_VECTOR must contain only CONST_INTs and
6941 CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
6942 Only store valid constants in a CONST_VECTOR. */
6943 for (i = 0; i < n_elts; ++i)
6944 {
6945 rtx x = XVECEXP (vals, 0, i);
6946 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
6947 n_const++;
6948 }
6949 if (n_const == n_elts)
6950 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6951 }
6952 else
6953 gcc_unreachable ();
6954
6955 if (const_vec != NULL_RTX
6956 && aarch64_simd_valid_immediate (const_vec, mode, false, NULL))
6957 /* Load using MOVI/MVNI. */
6958 return const_vec;
6959 else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
6960 /* Loaded using DUP. */
6961 return const_dup;
6962 else if (const_vec != NULL_RTX)
6963 /* Load from constant pool. We can not take advantage of single-cycle
6964 LD1 because we need a PC-relative addressing mode. */
6965 return const_vec;
6966 else
6967 /* A PARALLEL containing something not valid inside CONST_VECTOR.
6968 We can not construct an initializer. */
6969 return NULL_RTX;
6970 }
6971
6972 void
6973 aarch64_expand_vector_init (rtx target, rtx vals)
6974 {
6975 enum machine_mode mode = GET_MODE (target);
6976 enum machine_mode inner_mode = GET_MODE_INNER (mode);
6977 int n_elts = GET_MODE_NUNITS (mode);
6978 int n_var = 0, one_var = -1;
6979 bool all_same = true;
6980 rtx x, mem;
6981 int i;
6982
6983 x = XVECEXP (vals, 0, 0);
6984 if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
6985 n_var = 1, one_var = 0;
6986
6987 for (i = 1; i < n_elts; ++i)
6988 {
6989 x = XVECEXP (vals, 0, i);
6990 if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
6991 ++n_var, one_var = i;
6992
6993 if (!rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6994 all_same = false;
6995 }
6996
6997 if (n_var == 0)
6998 {
6999 rtx constant = aarch64_simd_make_constant (vals);
7000 if (constant != NULL_RTX)
7001 {
7002 emit_move_insn (target, constant);
7003 return;
7004 }
7005 }
7006
7007 /* Splat a single non-constant element if we can. */
7008 if (all_same)
7009 {
7010 x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
7011 aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
7012 return;
7013 }
7014
7015 /* One field is non-constant. Load constant then overwrite varying
7016 field. This is more efficient than using the stack. */
7017 if (n_var == 1)
7018 {
7019 rtx copy = copy_rtx (vals);
7020 rtx index = GEN_INT (one_var);
7021 enum insn_code icode;
7022
7023 /* Load constant part of vector, substitute neighboring value for
7024 varying element. */
7025 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, one_var ^ 1);
7026 aarch64_expand_vector_init (target, copy);
7027
7028 /* Insert variable. */
7029 x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var));
7030 icode = optab_handler (vec_set_optab, mode);
7031 gcc_assert (icode != CODE_FOR_nothing);
7032 emit_insn (GEN_FCN (icode) (target, x, index));
7033 return;
7034 }
7035
7036 /* Construct the vector in memory one field at a time
7037 and load the whole vector. */
7038 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7039 for (i = 0; i < n_elts; i++)
7040 emit_move_insn (adjust_address_nv (mem, inner_mode,
7041 i * GET_MODE_SIZE (inner_mode)),
7042 XVECEXP (vals, 0, i));
7043 emit_move_insn (target, mem);
7044
7045 }
7046
7047 static unsigned HOST_WIDE_INT
7048 aarch64_shift_truncation_mask (enum machine_mode mode)
7049 {
7050 return
7051 (aarch64_vector_mode_supported_p (mode)
7052 || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
7053 }
7054
7055 #ifndef TLS_SECTION_ASM_FLAG
7056 #define TLS_SECTION_ASM_FLAG 'T'
7057 #endif
7058
7059 void
7060 aarch64_elf_asm_named_section (const char *name, unsigned int flags,
7061 tree decl ATTRIBUTE_UNUSED)
7062 {
7063 char flagchars[10], *f = flagchars;
7064
7065 /* If we have already declared this section, we can use an
7066 abbreviated form to switch back to it -- unless this section is
7067 part of a COMDAT groups, in which case GAS requires the full
7068 declaration every time. */
7069 if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
7070 && (flags & SECTION_DECLARED))
7071 {
7072 fprintf (asm_out_file, "\t.section\t%s\n", name);
7073 return;
7074 }
7075
7076 if (!(flags & SECTION_DEBUG))
7077 *f++ = 'a';
7078 if (flags & SECTION_WRITE)
7079 *f++ = 'w';
7080 if (flags & SECTION_CODE)
7081 *f++ = 'x';
7082 if (flags & SECTION_SMALL)
7083 *f++ = 's';
7084 if (flags & SECTION_MERGE)
7085 *f++ = 'M';
7086 if (flags & SECTION_STRINGS)
7087 *f++ = 'S';
7088 if (flags & SECTION_TLS)
7089 *f++ = TLS_SECTION_ASM_FLAG;
7090 if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
7091 *f++ = 'G';
7092 *f = '\0';
7093
7094 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
7095
7096 if (!(flags & SECTION_NOTYPE))
7097 {
7098 const char *type;
7099 const char *format;
7100
7101 if (flags & SECTION_BSS)
7102 type = "nobits";
7103 else
7104 type = "progbits";
7105
7106 #ifdef TYPE_OPERAND_FMT
7107 format = "," TYPE_OPERAND_FMT;
7108 #else
7109 format = ",@%s";
7110 #endif
7111
7112 fprintf (asm_out_file, format, type);
7113
7114 if (flags & SECTION_ENTSIZE)
7115 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
7116 if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
7117 {
7118 if (TREE_CODE (decl) == IDENTIFIER_NODE)
7119 fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl));
7120 else
7121 fprintf (asm_out_file, ",%s,comdat",
7122 IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)));
7123 }
7124 }
7125
7126 putc ('\n', asm_out_file);
7127 }
7128
7129 /* Select a format to encode pointers in exception handling data. */
7130 int
7131 aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
7132 {
7133 int type;
7134 switch (aarch64_cmodel)
7135 {
7136 case AARCH64_CMODEL_TINY:
7137 case AARCH64_CMODEL_TINY_PIC:
7138 case AARCH64_CMODEL_SMALL:
7139 case AARCH64_CMODEL_SMALL_PIC:
7140 /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
7141 for everything. */
7142 type = DW_EH_PE_sdata4;
7143 break;
7144 default:
7145 /* No assumptions here. 8-byte relocs required. */
7146 type = DW_EH_PE_sdata8;
7147 break;
7148 }
7149 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
7150 }
7151
7152 /* Emit load exclusive. */
7153
7154 static void
7155 aarch64_emit_load_exclusive (enum machine_mode mode, rtx rval,
7156 rtx mem, rtx model_rtx)
7157 {
7158 rtx (*gen) (rtx, rtx, rtx);
7159
7160 switch (mode)
7161 {
7162 case QImode: gen = gen_aarch64_load_exclusiveqi; break;
7163 case HImode: gen = gen_aarch64_load_exclusivehi; break;
7164 case SImode: gen = gen_aarch64_load_exclusivesi; break;
7165 case DImode: gen = gen_aarch64_load_exclusivedi; break;
7166 default:
7167 gcc_unreachable ();
7168 }
7169
7170 emit_insn (gen (rval, mem, model_rtx));
7171 }
7172
7173 /* Emit store exclusive. */
7174
7175 static void
7176 aarch64_emit_store_exclusive (enum machine_mode mode, rtx bval,
7177 rtx rval, rtx mem, rtx model_rtx)
7178 {
7179 rtx (*gen) (rtx, rtx, rtx, rtx);
7180
7181 switch (mode)
7182 {
7183 case QImode: gen = gen_aarch64_store_exclusiveqi; break;
7184 case HImode: gen = gen_aarch64_store_exclusivehi; break;
7185 case SImode: gen = gen_aarch64_store_exclusivesi; break;
7186 case DImode: gen = gen_aarch64_store_exclusivedi; break;
7187 default:
7188 gcc_unreachable ();
7189 }
7190
7191 emit_insn (gen (bval, rval, mem, model_rtx));
7192 }
7193
7194 /* Mark the previous jump instruction as unlikely. */
7195
7196 static void
7197 aarch64_emit_unlikely_jump (rtx insn)
7198 {
7199 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
7200
7201 insn = emit_jump_insn (insn);
7202 add_int_reg_note (insn, REG_BR_PROB, very_unlikely);
7203 }
7204
7205 /* Expand a compare and swap pattern. */
7206
7207 void
7208 aarch64_expand_compare_and_swap (rtx operands[])
7209 {
7210 rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
7211 enum machine_mode mode, cmp_mode;
7212 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
7213
7214 bval = operands[0];
7215 rval = operands[1];
7216 mem = operands[2];
7217 oldval = operands[3];
7218 newval = operands[4];
7219 is_weak = operands[5];
7220 mod_s = operands[6];
7221 mod_f = operands[7];
7222 mode = GET_MODE (mem);
7223 cmp_mode = mode;
7224
7225 /* Normally the succ memory model must be stronger than fail, but in the
7226 unlikely event of fail being ACQUIRE and succ being RELEASE we need to
7227 promote succ to ACQ_REL so that we don't lose the acquire semantics. */
7228
7229 if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
7230 && INTVAL (mod_s) == MEMMODEL_RELEASE)
7231 mod_s = GEN_INT (MEMMODEL_ACQ_REL);
7232
7233 switch (mode)
7234 {
7235 case QImode:
7236 case HImode:
7237 /* For short modes, we're going to perform the comparison in SImode,
7238 so do the zero-extension now. */
7239 cmp_mode = SImode;
7240 rval = gen_reg_rtx (SImode);
7241 oldval = convert_modes (SImode, mode, oldval, true);
7242 /* Fall through. */
7243
7244 case SImode:
7245 case DImode:
7246 /* Force the value into a register if needed. */
7247 if (!aarch64_plus_operand (oldval, mode))
7248 oldval = force_reg (cmp_mode, oldval);
7249 break;
7250
7251 default:
7252 gcc_unreachable ();
7253 }
7254
7255 switch (mode)
7256 {
7257 case QImode: gen = gen_atomic_compare_and_swapqi_1; break;
7258 case HImode: gen = gen_atomic_compare_and_swaphi_1; break;
7259 case SImode: gen = gen_atomic_compare_and_swapsi_1; break;
7260 case DImode: gen = gen_atomic_compare_and_swapdi_1; break;
7261 default:
7262 gcc_unreachable ();
7263 }
7264
7265 emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
7266
7267 if (mode == QImode || mode == HImode)
7268 emit_move_insn (operands[1], gen_lowpart (mode, rval));
7269
7270 x = gen_rtx_REG (CCmode, CC_REGNUM);
7271 x = gen_rtx_EQ (SImode, x, const0_rtx);
7272 emit_insn (gen_rtx_SET (VOIDmode, bval, x));
7273 }
7274
7275 /* Split a compare and swap pattern. */
7276
7277 void
7278 aarch64_split_compare_and_swap (rtx operands[])
7279 {
7280 rtx rval, mem, oldval, newval, scratch;
7281 enum machine_mode mode;
7282 bool is_weak;
7283 rtx label1, label2, x, cond;
7284
7285 rval = operands[0];
7286 mem = operands[1];
7287 oldval = operands[2];
7288 newval = operands[3];
7289 is_weak = (operands[4] != const0_rtx);
7290 scratch = operands[7];
7291 mode = GET_MODE (mem);
7292
7293 label1 = NULL_RTX;
7294 if (!is_weak)
7295 {
7296 label1 = gen_label_rtx ();
7297 emit_label (label1);
7298 }
7299 label2 = gen_label_rtx ();
7300
7301 aarch64_emit_load_exclusive (mode, rval, mem, operands[5]);
7302
7303 cond = aarch64_gen_compare_reg (NE, rval, oldval);
7304 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
7305 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
7306 gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
7307 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
7308
7309 aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]);
7310
7311 if (!is_weak)
7312 {
7313 x = gen_rtx_NE (VOIDmode, scratch, const0_rtx);
7314 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
7315 gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
7316 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
7317 }
7318 else
7319 {
7320 cond = gen_rtx_REG (CCmode, CC_REGNUM);
7321 x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
7322 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
7323 }
7324
7325 emit_label (label2);
7326 }
7327
7328 /* Split an atomic operation. */
7329
7330 void
7331 aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
7332 rtx value, rtx model_rtx, rtx cond)
7333 {
7334 enum machine_mode mode = GET_MODE (mem);
7335 enum machine_mode wmode = (mode == DImode ? DImode : SImode);
7336 rtx label, x;
7337
7338 label = gen_label_rtx ();
7339 emit_label (label);
7340
7341 if (new_out)
7342 new_out = gen_lowpart (wmode, new_out);
7343 if (old_out)
7344 old_out = gen_lowpart (wmode, old_out);
7345 else
7346 old_out = new_out;
7347 value = simplify_gen_subreg (wmode, value, mode, 0);
7348
7349 aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
7350
7351 switch (code)
7352 {
7353 case SET:
7354 new_out = value;
7355 break;
7356
7357 case NOT:
7358 x = gen_rtx_AND (wmode, old_out, value);
7359 emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
7360 x = gen_rtx_NOT (wmode, new_out);
7361 emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
7362 break;
7363
7364 case MINUS:
7365 if (CONST_INT_P (value))
7366 {
7367 value = GEN_INT (-INTVAL (value));
7368 code = PLUS;
7369 }
7370 /* Fall through. */
7371
7372 default:
7373 x = gen_rtx_fmt_ee (code, wmode, old_out, value);
7374 emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
7375 break;
7376 }
7377
7378 aarch64_emit_store_exclusive (mode, cond, mem,
7379 gen_lowpart (mode, new_out), model_rtx);
7380
7381 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
7382 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
7383 gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
7384 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
7385 }
7386
7387 static void
7388 aarch64_print_extension (void)
7389 {
7390 const struct aarch64_option_extension *opt = NULL;
7391
7392 for (opt = all_extensions; opt->name != NULL; opt++)
7393 if ((aarch64_isa_flags & opt->flags_on) == opt->flags_on)
7394 asm_fprintf (asm_out_file, "+%s", opt->name);
7395
7396 asm_fprintf (asm_out_file, "\n");
7397 }
7398
7399 static void
7400 aarch64_start_file (void)
7401 {
7402 if (selected_arch)
7403 {
7404 asm_fprintf (asm_out_file, "\t.arch %s", selected_arch->name);
7405 aarch64_print_extension ();
7406 }
7407 else if (selected_cpu)
7408 {
7409 asm_fprintf (asm_out_file, "\t.cpu %s", selected_cpu->name);
7410 aarch64_print_extension ();
7411 }
7412 default_file_start();
7413 }
7414
7415 /* Target hook for c_mode_for_suffix. */
7416 static enum machine_mode
7417 aarch64_c_mode_for_suffix (char suffix)
7418 {
7419 if (suffix == 'q')
7420 return TFmode;
7421
7422 return VOIDmode;
7423 }
7424
7425 /* We can only represent floating point constants which will fit in
7426 "quarter-precision" values. These values are characterised by
7427 a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given
7428 by:
7429
7430 (-1)^s * (n/16) * 2^r
7431
7432 Where:
7433 's' is the sign bit.
7434 'n' is an integer in the range 16 <= n <= 31.
7435 'r' is an integer in the range -3 <= r <= 4. */
7436
7437 /* Return true iff X can be represented by a quarter-precision
7438 floating point immediate operand X. Note, we cannot represent 0.0. */
7439 bool
7440 aarch64_float_const_representable_p (rtx x)
7441 {
7442 /* This represents our current view of how many bits
7443 make up the mantissa. */
7444 int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
7445 int exponent;
7446 unsigned HOST_WIDE_INT mantissa, mask;
7447 REAL_VALUE_TYPE r, m;
7448 bool fail;
7449
7450 if (!CONST_DOUBLE_P (x))
7451 return false;
7452
7453 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7454
7455 /* We cannot represent infinities, NaNs or +/-zero. We won't
7456 know if we have +zero until we analyse the mantissa, but we
7457 can reject the other invalid values. */
7458 if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r)
7459 || REAL_VALUE_MINUS_ZERO (r))
7460 return false;
7461
7462 /* Extract exponent. */
7463 r = real_value_abs (&r);
7464 exponent = REAL_EXP (&r);
7465
7466 /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
7467 highest (sign) bit, with a fixed binary point at bit point_pos.
7468 m1 holds the low part of the mantissa, m2 the high part.
7469 WARNING: If we ever have a representation using more than 2 * H_W_I - 1
7470 bits for the mantissa, this can fail (low bits will be lost). */
7471 real_ldexp (&m, &r, point_pos - exponent);
7472 wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
7473
7474 /* If the low part of the mantissa has bits set we cannot represent
7475 the value. */
7476 if (w.elt (0) != 0)
7477 return false;
7478 /* We have rejected the lower HOST_WIDE_INT, so update our
7479 understanding of how many bits lie in the mantissa and
7480 look only at the high HOST_WIDE_INT. */
7481 mantissa = w.elt (1);
7482 point_pos -= HOST_BITS_PER_WIDE_INT;
7483
7484 /* We can only represent values with a mantissa of the form 1.xxxx. */
7485 mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
7486 if ((mantissa & mask) != 0)
7487 return false;
7488
7489 /* Having filtered unrepresentable values, we may now remove all
7490 but the highest 5 bits. */
7491 mantissa >>= point_pos - 5;
7492
7493 /* We cannot represent the value 0.0, so reject it. This is handled
7494 elsewhere. */
7495 if (mantissa == 0)
7496 return false;
7497
7498 /* Then, as bit 4 is always set, we can mask it off, leaving
7499 the mantissa in the range [0, 15]. */
7500 mantissa &= ~(1 << 4);
7501 gcc_assert (mantissa <= 15);
7502
7503 /* GCC internally does not use IEEE754-like encoding (where normalized
7504 significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c).
7505 Our mantissa values are shifted 4 places to the left relative to
7506 normalized IEEE754 so we must modify the exponent returned by REAL_EXP
7507 by 5 places to correct for GCC's representation. */
7508 exponent = 5 - exponent;
7509
7510 return (exponent >= 0 && exponent <= 7);
7511 }
7512
7513 char*
7514 aarch64_output_simd_mov_immediate (rtx const_vector,
7515 enum machine_mode mode,
7516 unsigned width)
7517 {
7518 bool is_valid;
7519 static char templ[40];
7520 const char *mnemonic;
7521 const char *shift_op;
7522 unsigned int lane_count = 0;
7523 char element_char;
7524
7525 struct simd_immediate_info info = { NULL_RTX, 0, 0, false, false };
7526
7527 /* This will return true to show const_vector is legal for use as either
7528 a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will
7529 also update INFO to show how the immediate should be generated. */
7530 is_valid = aarch64_simd_valid_immediate (const_vector, mode, false, &info);
7531 gcc_assert (is_valid);
7532
7533 element_char = sizetochar (info.element_width);
7534 lane_count = width / info.element_width;
7535
7536 mode = GET_MODE_INNER (mode);
7537 if (mode == SFmode || mode == DFmode)
7538 {
7539 gcc_assert (info.shift == 0 && ! info.mvn);
7540 if (aarch64_float_const_zero_rtx_p (info.value))
7541 info.value = GEN_INT (0);
7542 else
7543 {
7544 #define buf_size 20
7545 REAL_VALUE_TYPE r;
7546 REAL_VALUE_FROM_CONST_DOUBLE (r, info.value);
7547 char float_buf[buf_size] = {'\0'};
7548 real_to_decimal_for_mode (float_buf, &r, buf_size, buf_size, 1, mode);
7549 #undef buf_size
7550
7551 if (lane_count == 1)
7552 snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf);
7553 else
7554 snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s",
7555 lane_count, element_char, float_buf);
7556 return templ;
7557 }
7558 }
7559
7560 mnemonic = info.mvn ? "mvni" : "movi";
7561 shift_op = info.msl ? "msl" : "lsl";
7562
7563 if (lane_count == 1)
7564 snprintf (templ, sizeof (templ), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX,
7565 mnemonic, UINTVAL (info.value));
7566 else if (info.shift)
7567 snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
7568 ", %s %d", mnemonic, lane_count, element_char,
7569 UINTVAL (info.value), shift_op, info.shift);
7570 else
7571 snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX,
7572 mnemonic, lane_count, element_char, UINTVAL (info.value));
7573 return templ;
7574 }
7575
7576 char*
7577 aarch64_output_scalar_simd_mov_immediate (rtx immediate,
7578 enum machine_mode mode)
7579 {
7580 enum machine_mode vmode;
7581
7582 gcc_assert (!VECTOR_MODE_P (mode));
7583 vmode = aarch64_simd_container_mode (mode, 64);
7584 rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
7585 return aarch64_output_simd_mov_immediate (v_op, vmode, 64);
7586 }
7587
7588 /* Split operands into moves from op[1] + op[2] into op[0]. */
7589
7590 void
7591 aarch64_split_combinev16qi (rtx operands[3])
7592 {
7593 unsigned int dest = REGNO (operands[0]);
7594 unsigned int src1 = REGNO (operands[1]);
7595 unsigned int src2 = REGNO (operands[2]);
7596 enum machine_mode halfmode = GET_MODE (operands[1]);
7597 unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode);
7598 rtx destlo, desthi;
7599
7600 gcc_assert (halfmode == V16QImode);
7601
7602 if (src1 == dest && src2 == dest + halfregs)
7603 {
7604 /* No-op move. Can't split to nothing; emit something. */
7605 emit_note (NOTE_INSN_DELETED);
7606 return;
7607 }
7608
7609 /* Preserve register attributes for variable tracking. */
7610 destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
7611 desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
7612 GET_MODE_SIZE (halfmode));
7613
7614 /* Special case of reversed high/low parts. */
7615 if (reg_overlap_mentioned_p (operands[2], destlo)
7616 && reg_overlap_mentioned_p (operands[1], desthi))
7617 {
7618 emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
7619 emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2]));
7620 emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
7621 }
7622 else if (!reg_overlap_mentioned_p (operands[2], destlo))
7623 {
7624 /* Try to avoid unnecessary moves if part of the result
7625 is in the right place already. */
7626 if (src1 != dest)
7627 emit_move_insn (destlo, operands[1]);
7628 if (src2 != dest + halfregs)
7629 emit_move_insn (desthi, operands[2]);
7630 }
7631 else
7632 {
7633 if (src2 != dest + halfregs)
7634 emit_move_insn (desthi, operands[2]);
7635 if (src1 != dest)
7636 emit_move_insn (destlo, operands[1]);
7637 }
7638 }
7639
7640 /* vec_perm support. */
7641
7642 #define MAX_VECT_LEN 16
7643
7644 struct expand_vec_perm_d
7645 {
7646 rtx target, op0, op1;
7647 unsigned char perm[MAX_VECT_LEN];
7648 enum machine_mode vmode;
7649 unsigned char nelt;
7650 bool one_vector_p;
7651 bool testing_p;
7652 };
7653
7654 /* Generate a variable permutation. */
7655
7656 static void
7657 aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel)
7658 {
7659 enum machine_mode vmode = GET_MODE (target);
7660 bool one_vector_p = rtx_equal_p (op0, op1);
7661
7662 gcc_checking_assert (vmode == V8QImode || vmode == V16QImode);
7663 gcc_checking_assert (GET_MODE (op0) == vmode);
7664 gcc_checking_assert (GET_MODE (op1) == vmode);
7665 gcc_checking_assert (GET_MODE (sel) == vmode);
7666 gcc_checking_assert (TARGET_SIMD);
7667
7668 if (one_vector_p)
7669 {
7670 if (vmode == V8QImode)
7671 {
7672 /* Expand the argument to a V16QI mode by duplicating it. */
7673 rtx pair = gen_reg_rtx (V16QImode);
7674 emit_insn (gen_aarch64_combinev8qi (pair, op0, op0));
7675 emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
7676 }
7677 else
7678 {
7679 emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel));
7680 }
7681 }
7682 else
7683 {
7684 rtx pair;
7685
7686 if (vmode == V8QImode)
7687 {
7688 pair = gen_reg_rtx (V16QImode);
7689 emit_insn (gen_aarch64_combinev8qi (pair, op0, op1));
7690 emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
7691 }
7692 else
7693 {
7694 pair = gen_reg_rtx (OImode);
7695 emit_insn (gen_aarch64_combinev16qi (pair, op0, op1));
7696 emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel));
7697 }
7698 }
7699 }
7700
7701 void
7702 aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
7703 {
7704 enum machine_mode vmode = GET_MODE (target);
7705 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
7706 bool one_vector_p = rtx_equal_p (op0, op1);
7707 rtx rmask[MAX_VECT_LEN], mask;
7708
7709 gcc_checking_assert (!BYTES_BIG_ENDIAN);
7710
7711 /* The TBL instruction does not use a modulo index, so we must take care
7712 of that ourselves. */
7713 mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1);
7714 for (i = 0; i < nelt; ++i)
7715 rmask[i] = mask;
7716 mask = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rmask));
7717 sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
7718
7719 aarch64_expand_vec_perm_1 (target, op0, op1, sel);
7720 }
7721
7722 /* Recognize patterns suitable for the TRN instructions. */
7723 static bool
7724 aarch64_evpc_trn (struct expand_vec_perm_d *d)
7725 {
7726 unsigned int i, odd, mask, nelt = d->nelt;
7727 rtx out, in0, in1, x;
7728 rtx (*gen) (rtx, rtx, rtx);
7729 enum machine_mode vmode = d->vmode;
7730
7731 if (GET_MODE_UNIT_SIZE (vmode) > 8)
7732 return false;
7733
7734 /* Note that these are little-endian tests.
7735 We correct for big-endian later. */
7736 if (d->perm[0] == 0)
7737 odd = 0;
7738 else if (d->perm[0] == 1)
7739 odd = 1;
7740 else
7741 return false;
7742 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
7743
7744 for (i = 0; i < nelt; i += 2)
7745 {
7746 if (d->perm[i] != i + odd)
7747 return false;
7748 if (d->perm[i + 1] != ((i + nelt + odd) & mask))
7749 return false;
7750 }
7751
7752 /* Success! */
7753 if (d->testing_p)
7754 return true;
7755
7756 in0 = d->op0;
7757 in1 = d->op1;
7758 if (BYTES_BIG_ENDIAN)
7759 {
7760 x = in0, in0 = in1, in1 = x;
7761 odd = !odd;
7762 }
7763 out = d->target;
7764
7765 if (odd)
7766 {
7767 switch (vmode)
7768 {
7769 case V16QImode: gen = gen_aarch64_trn2v16qi; break;
7770 case V8QImode: gen = gen_aarch64_trn2v8qi; break;
7771 case V8HImode: gen = gen_aarch64_trn2v8hi; break;
7772 case V4HImode: gen = gen_aarch64_trn2v4hi; break;
7773 case V4SImode: gen = gen_aarch64_trn2v4si; break;
7774 case V2SImode: gen = gen_aarch64_trn2v2si; break;
7775 case V2DImode: gen = gen_aarch64_trn2v2di; break;
7776 case V4SFmode: gen = gen_aarch64_trn2v4sf; break;
7777 case V2SFmode: gen = gen_aarch64_trn2v2sf; break;
7778 case V2DFmode: gen = gen_aarch64_trn2v2df; break;
7779 default:
7780 return false;
7781 }
7782 }
7783 else
7784 {
7785 switch (vmode)
7786 {
7787 case V16QImode: gen = gen_aarch64_trn1v16qi; break;
7788 case V8QImode: gen = gen_aarch64_trn1v8qi; break;
7789 case V8HImode: gen = gen_aarch64_trn1v8hi; break;
7790 case V4HImode: gen = gen_aarch64_trn1v4hi; break;
7791 case V4SImode: gen = gen_aarch64_trn1v4si; break;
7792 case V2SImode: gen = gen_aarch64_trn1v2si; break;
7793 case V2DImode: gen = gen_aarch64_trn1v2di; break;
7794 case V4SFmode: gen = gen_aarch64_trn1v4sf; break;
7795 case V2SFmode: gen = gen_aarch64_trn1v2sf; break;
7796 case V2DFmode: gen = gen_aarch64_trn1v2df; break;
7797 default:
7798 return false;
7799 }
7800 }
7801
7802 emit_insn (gen (out, in0, in1));
7803 return true;
7804 }
7805
7806 /* Recognize patterns suitable for the UZP instructions. */
7807 static bool
7808 aarch64_evpc_uzp (struct expand_vec_perm_d *d)
7809 {
7810 unsigned int i, odd, mask, nelt = d->nelt;
7811 rtx out, in0, in1, x;
7812 rtx (*gen) (rtx, rtx, rtx);
7813 enum machine_mode vmode = d->vmode;
7814
7815 if (GET_MODE_UNIT_SIZE (vmode) > 8)
7816 return false;
7817
7818 /* Note that these are little-endian tests.
7819 We correct for big-endian later. */
7820 if (d->perm[0] == 0)
7821 odd = 0;
7822 else if (d->perm[0] == 1)
7823 odd = 1;
7824 else
7825 return false;
7826 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
7827
7828 for (i = 0; i < nelt; i++)
7829 {
7830 unsigned elt = (i * 2 + odd) & mask;
7831 if (d->perm[i] != elt)
7832 return false;
7833 }
7834
7835 /* Success! */
7836 if (d->testing_p)
7837 return true;
7838
7839 in0 = d->op0;
7840 in1 = d->op1;
7841 if (BYTES_BIG_ENDIAN)
7842 {
7843 x = in0, in0 = in1, in1 = x;
7844 odd = !odd;
7845 }
7846 out = d->target;
7847
7848 if (odd)
7849 {
7850 switch (vmode)
7851 {
7852 case V16QImode: gen = gen_aarch64_uzp2v16qi; break;
7853 case V8QImode: gen = gen_aarch64_uzp2v8qi; break;
7854 case V8HImode: gen = gen_aarch64_uzp2v8hi; break;
7855 case V4HImode: gen = gen_aarch64_uzp2v4hi; break;
7856 case V4SImode: gen = gen_aarch64_uzp2v4si; break;
7857 case V2SImode: gen = gen_aarch64_uzp2v2si; break;
7858 case V2DImode: gen = gen_aarch64_uzp2v2di; break;
7859 case V4SFmode: gen = gen_aarch64_uzp2v4sf; break;
7860 case V2SFmode: gen = gen_aarch64_uzp2v2sf; break;
7861 case V2DFmode: gen = gen_aarch64_uzp2v2df; break;
7862 default:
7863 return false;
7864 }
7865 }
7866 else
7867 {
7868 switch (vmode)
7869 {
7870 case V16QImode: gen = gen_aarch64_uzp1v16qi; break;
7871 case V8QImode: gen = gen_aarch64_uzp1v8qi; break;
7872 case V8HImode: gen = gen_aarch64_uzp1v8hi; break;
7873 case V4HImode: gen = gen_aarch64_uzp1v4hi; break;
7874 case V4SImode: gen = gen_aarch64_uzp1v4si; break;
7875 case V2SImode: gen = gen_aarch64_uzp1v2si; break;
7876 case V2DImode: gen = gen_aarch64_uzp1v2di; break;
7877 case V4SFmode: gen = gen_aarch64_uzp1v4sf; break;
7878 case V2SFmode: gen = gen_aarch64_uzp1v2sf; break;
7879 case V2DFmode: gen = gen_aarch64_uzp1v2df; break;
7880 default:
7881 return false;
7882 }
7883 }
7884
7885 emit_insn (gen (out, in0, in1));
7886 return true;
7887 }
7888
7889 /* Recognize patterns suitable for the ZIP instructions. */
7890 static bool
7891 aarch64_evpc_zip (struct expand_vec_perm_d *d)
7892 {
7893 unsigned int i, high, mask, nelt = d->nelt;
7894 rtx out, in0, in1, x;
7895 rtx (*gen) (rtx, rtx, rtx);
7896 enum machine_mode vmode = d->vmode;
7897
7898 if (GET_MODE_UNIT_SIZE (vmode) > 8)
7899 return false;
7900
7901 /* Note that these are little-endian tests.
7902 We correct for big-endian later. */
7903 high = nelt / 2;
7904 if (d->perm[0] == high)
7905 /* Do Nothing. */
7906 ;
7907 else if (d->perm[0] == 0)
7908 high = 0;
7909 else
7910 return false;
7911 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
7912
7913 for (i = 0; i < nelt / 2; i++)
7914 {
7915 unsigned elt = (i + high) & mask;
7916 if (d->perm[i * 2] != elt)
7917 return false;
7918 elt = (elt + nelt) & mask;
7919 if (d->perm[i * 2 + 1] != elt)
7920 return false;
7921 }
7922
7923 /* Success! */
7924 if (d->testing_p)
7925 return true;
7926
7927 in0 = d->op0;
7928 in1 = d->op1;
7929 if (BYTES_BIG_ENDIAN)
7930 {
7931 x = in0, in0 = in1, in1 = x;
7932 high = !high;
7933 }
7934 out = d->target;
7935
7936 if (high)
7937 {
7938 switch (vmode)
7939 {
7940 case V16QImode: gen = gen_aarch64_zip2v16qi; break;
7941 case V8QImode: gen = gen_aarch64_zip2v8qi; break;
7942 case V8HImode: gen = gen_aarch64_zip2v8hi; break;
7943 case V4HImode: gen = gen_aarch64_zip2v4hi; break;
7944 case V4SImode: gen = gen_aarch64_zip2v4si; break;
7945 case V2SImode: gen = gen_aarch64_zip2v2si; break;
7946 case V2DImode: gen = gen_aarch64_zip2v2di; break;
7947 case V4SFmode: gen = gen_aarch64_zip2v4sf; break;
7948 case V2SFmode: gen = gen_aarch64_zip2v2sf; break;
7949 case V2DFmode: gen = gen_aarch64_zip2v2df; break;
7950 default:
7951 return false;
7952 }
7953 }
7954 else
7955 {
7956 switch (vmode)
7957 {
7958 case V16QImode: gen = gen_aarch64_zip1v16qi; break;
7959 case V8QImode: gen = gen_aarch64_zip1v8qi; break;
7960 case V8HImode: gen = gen_aarch64_zip1v8hi; break;
7961 case V4HImode: gen = gen_aarch64_zip1v4hi; break;
7962 case V4SImode: gen = gen_aarch64_zip1v4si; break;
7963 case V2SImode: gen = gen_aarch64_zip1v2si; break;
7964 case V2DImode: gen = gen_aarch64_zip1v2di; break;
7965 case V4SFmode: gen = gen_aarch64_zip1v4sf; break;
7966 case V2SFmode: gen = gen_aarch64_zip1v2sf; break;
7967 case V2DFmode: gen = gen_aarch64_zip1v2df; break;
7968 default:
7969 return false;
7970 }
7971 }
7972
7973 emit_insn (gen (out, in0, in1));
7974 return true;
7975 }
7976
7977 static bool
7978 aarch64_evpc_dup (struct expand_vec_perm_d *d)
7979 {
7980 rtx (*gen) (rtx, rtx, rtx);
7981 rtx out = d->target;
7982 rtx in0;
7983 enum machine_mode vmode = d->vmode;
7984 unsigned int i, elt, nelt = d->nelt;
7985 rtx lane;
7986
7987 /* TODO: This may not be big-endian safe. */
7988 if (BYTES_BIG_ENDIAN)
7989 return false;
7990
7991 elt = d->perm[0];
7992 for (i = 1; i < nelt; i++)
7993 {
7994 if (elt != d->perm[i])
7995 return false;
7996 }
7997
7998 /* The generic preparation in aarch64_expand_vec_perm_const_1
7999 swaps the operand order and the permute indices if it finds
8000 d->perm[0] to be in the second operand. Thus, we can always
8001 use d->op0 and need not do any extra arithmetic to get the
8002 correct lane number. */
8003 in0 = d->op0;
8004 lane = GEN_INT (elt);
8005
8006 switch (vmode)
8007 {
8008 case V16QImode: gen = gen_aarch64_dup_lanev16qi; break;
8009 case V8QImode: gen = gen_aarch64_dup_lanev8qi; break;
8010 case V8HImode: gen = gen_aarch64_dup_lanev8hi; break;
8011 case V4HImode: gen = gen_aarch64_dup_lanev4hi; break;
8012 case V4SImode: gen = gen_aarch64_dup_lanev4si; break;
8013 case V2SImode: gen = gen_aarch64_dup_lanev2si; break;
8014 case V2DImode: gen = gen_aarch64_dup_lanev2di; break;
8015 case V4SFmode: gen = gen_aarch64_dup_lanev4sf; break;
8016 case V2SFmode: gen = gen_aarch64_dup_lanev2sf; break;
8017 case V2DFmode: gen = gen_aarch64_dup_lanev2df; break;
8018 default:
8019 return false;
8020 }
8021
8022 emit_insn (gen (out, in0, lane));
8023 return true;
8024 }
8025
8026 static bool
8027 aarch64_evpc_tbl (struct expand_vec_perm_d *d)
8028 {
8029 rtx rperm[MAX_VECT_LEN], sel;
8030 enum machine_mode vmode = d->vmode;
8031 unsigned int i, nelt = d->nelt;
8032
8033 /* TODO: ARM's TBL indexing is little-endian. In order to handle GCC's
8034 numbering of elements for big-endian, we must reverse the order. */
8035 if (BYTES_BIG_ENDIAN)
8036 return false;
8037
8038 if (d->testing_p)
8039 return true;
8040
8041 /* Generic code will try constant permutation twice. Once with the
8042 original mode and again with the elements lowered to QImode.
8043 So wait and don't do the selector expansion ourselves. */
8044 if (vmode != V8QImode && vmode != V16QImode)
8045 return false;
8046
8047 for (i = 0; i < nelt; ++i)
8048 rperm[i] = GEN_INT (d->perm[i]);
8049 sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
8050 sel = force_reg (vmode, sel);
8051
8052 aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel);
8053 return true;
8054 }
8055
8056 static bool
8057 aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
8058 {
8059 /* The pattern matching functions above are written to look for a small
8060 number to begin the sequence (0, 1, N/2). If we begin with an index
8061 from the second operand, we can swap the operands. */
8062 if (d->perm[0] >= d->nelt)
8063 {
8064 unsigned i, nelt = d->nelt;
8065 rtx x;
8066
8067 for (i = 0; i < nelt; ++i)
8068 d->perm[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
8069
8070 x = d->op0;
8071 d->op0 = d->op1;
8072 d->op1 = x;
8073 }
8074
8075 if (TARGET_SIMD)
8076 {
8077 if (aarch64_evpc_zip (d))
8078 return true;
8079 else if (aarch64_evpc_uzp (d))
8080 return true;
8081 else if (aarch64_evpc_trn (d))
8082 return true;
8083 else if (aarch64_evpc_dup (d))
8084 return true;
8085 return aarch64_evpc_tbl (d);
8086 }
8087 return false;
8088 }
8089
8090 /* Expand a vec_perm_const pattern. */
8091
8092 bool
8093 aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel)
8094 {
8095 struct expand_vec_perm_d d;
8096 int i, nelt, which;
8097
8098 d.target = target;
8099 d.op0 = op0;
8100 d.op1 = op1;
8101
8102 d.vmode = GET_MODE (target);
8103 gcc_assert (VECTOR_MODE_P (d.vmode));
8104 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
8105 d.testing_p = false;
8106
8107 for (i = which = 0; i < nelt; ++i)
8108 {
8109 rtx e = XVECEXP (sel, 0, i);
8110 int ei = INTVAL (e) & (2 * nelt - 1);
8111 which |= (ei < nelt ? 1 : 2);
8112 d.perm[i] = ei;
8113 }
8114
8115 switch (which)
8116 {
8117 default:
8118 gcc_unreachable ();
8119
8120 case 3:
8121 d.one_vector_p = false;
8122 if (!rtx_equal_p (op0, op1))
8123 break;
8124
8125 /* The elements of PERM do not suggest that only the first operand
8126 is used, but both operands are identical. Allow easier matching
8127 of the permutation by folding the permutation into the single
8128 input vector. */
8129 /* Fall Through. */
8130 case 2:
8131 for (i = 0; i < nelt; ++i)
8132 d.perm[i] &= nelt - 1;
8133 d.op0 = op1;
8134 d.one_vector_p = true;
8135 break;
8136
8137 case 1:
8138 d.op1 = op0;
8139 d.one_vector_p = true;
8140 break;
8141 }
8142
8143 return aarch64_expand_vec_perm_const_1 (&d);
8144 }
8145
8146 static bool
8147 aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
8148 const unsigned char *sel)
8149 {
8150 struct expand_vec_perm_d d;
8151 unsigned int i, nelt, which;
8152 bool ret;
8153
8154 d.vmode = vmode;
8155 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
8156 d.testing_p = true;
8157 memcpy (d.perm, sel, nelt);
8158
8159 /* Calculate whether all elements are in one vector. */
8160 for (i = which = 0; i < nelt; ++i)
8161 {
8162 unsigned char e = d.perm[i];
8163 gcc_assert (e < 2 * nelt);
8164 which |= (e < nelt ? 1 : 2);
8165 }
8166
8167 /* If all elements are from the second vector, reindex as if from the
8168 first vector. */
8169 if (which == 2)
8170 for (i = 0; i < nelt; ++i)
8171 d.perm[i] -= nelt;
8172
8173 /* Check whether the mask can be applied to a single vector. */
8174 d.one_vector_p = (which != 3);
8175
8176 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
8177 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
8178 if (!d.one_vector_p)
8179 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
8180
8181 start_sequence ();
8182 ret = aarch64_expand_vec_perm_const_1 (&d);
8183 end_sequence ();
8184
8185 return ret;
8186 }
8187
8188 #undef TARGET_ADDRESS_COST
8189 #define TARGET_ADDRESS_COST aarch64_address_cost
8190
8191 /* This hook will determines whether unnamed bitfields affect the alignment
8192 of the containing structure. The hook returns true if the structure
8193 should inherit the alignment requirements of an unnamed bitfield's
8194 type. */
8195 #undef TARGET_ALIGN_ANON_BITFIELD
8196 #define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
8197
8198 #undef TARGET_ASM_ALIGNED_DI_OP
8199 #define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
8200
8201 #undef TARGET_ASM_ALIGNED_HI_OP
8202 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
8203
8204 #undef TARGET_ASM_ALIGNED_SI_OP
8205 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
8206
8207 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
8208 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
8209 hook_bool_const_tree_hwi_hwi_const_tree_true
8210
8211 #undef TARGET_ASM_FILE_START
8212 #define TARGET_ASM_FILE_START aarch64_start_file
8213
8214 #undef TARGET_ASM_OUTPUT_MI_THUNK
8215 #define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
8216
8217 #undef TARGET_ASM_SELECT_RTX_SECTION
8218 #define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
8219
8220 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
8221 #define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
8222
8223 #undef TARGET_BUILD_BUILTIN_VA_LIST
8224 #define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
8225
8226 #undef TARGET_CALLEE_COPIES
8227 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
8228
8229 #undef TARGET_CAN_ELIMINATE
8230 #define TARGET_CAN_ELIMINATE aarch64_can_eliminate
8231
8232 #undef TARGET_CANNOT_FORCE_CONST_MEM
8233 #define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
8234
8235 #undef TARGET_CONDITIONAL_REGISTER_USAGE
8236 #define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
8237
8238 /* Only the least significant bit is used for initialization guard
8239 variables. */
8240 #undef TARGET_CXX_GUARD_MASK_BIT
8241 #define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
8242
8243 #undef TARGET_C_MODE_FOR_SUFFIX
8244 #define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
8245
8246 #ifdef TARGET_BIG_ENDIAN_DEFAULT
8247 #undef TARGET_DEFAULT_TARGET_FLAGS
8248 #define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
8249 #endif
8250
8251 #undef TARGET_CLASS_MAX_NREGS
8252 #define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
8253
8254 #undef TARGET_BUILTIN_DECL
8255 #define TARGET_BUILTIN_DECL aarch64_builtin_decl
8256
8257 #undef TARGET_EXPAND_BUILTIN
8258 #define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
8259
8260 #undef TARGET_EXPAND_BUILTIN_VA_START
8261 #define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
8262
8263 #undef TARGET_FOLD_BUILTIN
8264 #define TARGET_FOLD_BUILTIN aarch64_fold_builtin
8265
8266 #undef TARGET_FUNCTION_ARG
8267 #define TARGET_FUNCTION_ARG aarch64_function_arg
8268
8269 #undef TARGET_FUNCTION_ARG_ADVANCE
8270 #define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
8271
8272 #undef TARGET_FUNCTION_ARG_BOUNDARY
8273 #define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
8274
8275 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
8276 #define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
8277
8278 #undef TARGET_FUNCTION_VALUE
8279 #define TARGET_FUNCTION_VALUE aarch64_function_value
8280
8281 #undef TARGET_FUNCTION_VALUE_REGNO_P
8282 #define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
8283
8284 #undef TARGET_FRAME_POINTER_REQUIRED
8285 #define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
8286
8287 #undef TARGET_GIMPLE_FOLD_BUILTIN
8288 #define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
8289
8290 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
8291 #define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
8292
8293 #undef TARGET_INIT_BUILTINS
8294 #define TARGET_INIT_BUILTINS aarch64_init_builtins
8295
8296 #undef TARGET_LEGITIMATE_ADDRESS_P
8297 #define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
8298
8299 #undef TARGET_LEGITIMATE_CONSTANT_P
8300 #define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
8301
8302 #undef TARGET_LIBGCC_CMP_RETURN_MODE
8303 #define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
8304
8305 #undef TARGET_LRA_P
8306 #define TARGET_LRA_P aarch64_lra_p
8307
8308 #undef TARGET_MANGLE_TYPE
8309 #define TARGET_MANGLE_TYPE aarch64_mangle_type
8310
8311 #undef TARGET_MEMORY_MOVE_COST
8312 #define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
8313
8314 #undef TARGET_MUST_PASS_IN_STACK
8315 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
8316
8317 /* This target hook should return true if accesses to volatile bitfields
8318 should use the narrowest mode possible. It should return false if these
8319 accesses should use the bitfield container type. */
8320 #undef TARGET_NARROW_VOLATILE_BITFIELD
8321 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
8322
8323 #undef TARGET_OPTION_OVERRIDE
8324 #define TARGET_OPTION_OVERRIDE aarch64_override_options
8325
8326 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
8327 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
8328 aarch64_override_options_after_change
8329
8330 #undef TARGET_PASS_BY_REFERENCE
8331 #define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
8332
8333 #undef TARGET_PREFERRED_RELOAD_CLASS
8334 #define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
8335
8336 #undef TARGET_SECONDARY_RELOAD
8337 #define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
8338
8339 #undef TARGET_SHIFT_TRUNCATION_MASK
8340 #define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
8341
8342 #undef TARGET_SETUP_INCOMING_VARARGS
8343 #define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
8344
8345 #undef TARGET_STRUCT_VALUE_RTX
8346 #define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
8347
8348 #undef TARGET_REGISTER_MOVE_COST
8349 #define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
8350
8351 #undef TARGET_RETURN_IN_MEMORY
8352 #define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
8353
8354 #undef TARGET_RETURN_IN_MSB
8355 #define TARGET_RETURN_IN_MSB aarch64_return_in_msb
8356
8357 #undef TARGET_RTX_COSTS
8358 #define TARGET_RTX_COSTS aarch64_rtx_costs
8359
8360 #undef TARGET_TRAMPOLINE_INIT
8361 #define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
8362
8363 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
8364 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
8365
8366 #undef TARGET_VECTOR_MODE_SUPPORTED_P
8367 #define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
8368
8369 #undef TARGET_ARRAY_MODE_SUPPORTED_P
8370 #define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
8371
8372 #undef TARGET_VECTORIZE_ADD_STMT_COST
8373 #define TARGET_VECTORIZE_ADD_STMT_COST aarch64_add_stmt_cost
8374
8375 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
8376 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
8377 aarch64_builtin_vectorization_cost
8378
8379 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
8380 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
8381
8382 #undef TARGET_VECTORIZE_BUILTINS
8383 #define TARGET_VECTORIZE_BUILTINS
8384
8385 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
8386 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
8387 aarch64_builtin_vectorized_function
8388
8389 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
8390 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
8391 aarch64_autovectorize_vector_sizes
8392
8393 /* Section anchor support. */
8394
8395 #undef TARGET_MIN_ANCHOR_OFFSET
8396 #define TARGET_MIN_ANCHOR_OFFSET -256
8397
8398 /* Limit the maximum anchor offset to 4k-1, since that's the limit for a
8399 byte offset; we can do much more for larger data types, but have no way
8400 to determine the size of the access. We assume accesses are aligned. */
8401 #undef TARGET_MAX_ANCHOR_OFFSET
8402 #define TARGET_MAX_ANCHOR_OFFSET 4095
8403
8404 #undef TARGET_VECTOR_ALIGNMENT
8405 #define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment
8406
8407 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
8408 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
8409 aarch64_simd_vector_alignment_reachable
8410
8411 /* vec_perm support. */
8412
8413 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
8414 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
8415 aarch64_vectorize_vec_perm_const_ok
8416
8417
8418 #undef TARGET_FIXED_CONDITION_CODE_REGS
8419 #define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
8420
8421 struct gcc_target targetm = TARGET_INITIALIZER;
8422
8423 #include "gt-aarch64.h"