]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/aarch64/aarch64.c
NEXT_INSN and PREV_INSN take a const rtx_insn
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64.c
1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "insn-attr.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "calls.h"
32 #include "varasm.h"
33 #include "regs.h"
34 #include "df.h"
35 #include "hard-reg-set.h"
36 #include "output.h"
37 #include "expr.h"
38 #include "reload.h"
39 #include "toplev.h"
40 #include "target.h"
41 #include "target-def.h"
42 #include "targhooks.h"
43 #include "ggc.h"
44 #include "function.h"
45 #include "tm_p.h"
46 #include "recog.h"
47 #include "langhooks.h"
48 #include "diagnostic-core.h"
49 #include "hash-table.h"
50 #include "vec.h"
51 #include "basic-block.h"
52 #include "tree-ssa-alias.h"
53 #include "internal-fn.h"
54 #include "gimple-fold.h"
55 #include "tree-eh.h"
56 #include "gimple-expr.h"
57 #include "is-a.h"
58 #include "gimple.h"
59 #include "gimplify.h"
60 #include "optabs.h"
61 #include "dwarf2.h"
62 #include "cfgloop.h"
63 #include "tree-vectorizer.h"
64 #include "config/arm/aarch-cost-tables.h"
65 #include "dumpfile.h"
66 #include "builtins.h"
67
68 /* Defined for convenience. */
69 #define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT)
70
71 /* Classifies an address.
72
73 ADDRESS_REG_IMM
74 A simple base register plus immediate offset.
75
76 ADDRESS_REG_WB
77 A base register indexed by immediate offset with writeback.
78
79 ADDRESS_REG_REG
80 A base register indexed by (optionally scaled) register.
81
82 ADDRESS_REG_UXTW
83 A base register indexed by (optionally scaled) zero-extended register.
84
85 ADDRESS_REG_SXTW
86 A base register indexed by (optionally scaled) sign-extended register.
87
88 ADDRESS_LO_SUM
89 A LO_SUM rtx with a base register and "LO12" symbol relocation.
90
91 ADDRESS_SYMBOLIC:
92 A constant symbolic address, in pc-relative literal pool. */
93
94 enum aarch64_address_type {
95 ADDRESS_REG_IMM,
96 ADDRESS_REG_WB,
97 ADDRESS_REG_REG,
98 ADDRESS_REG_UXTW,
99 ADDRESS_REG_SXTW,
100 ADDRESS_LO_SUM,
101 ADDRESS_SYMBOLIC
102 };
103
104 struct aarch64_address_info {
105 enum aarch64_address_type type;
106 rtx base;
107 rtx offset;
108 int shift;
109 enum aarch64_symbol_type symbol_type;
110 };
111
112 struct simd_immediate_info
113 {
114 rtx value;
115 int shift;
116 int element_width;
117 bool mvn;
118 bool msl;
119 };
120
121 /* The current code model. */
122 enum aarch64_code_model aarch64_cmodel;
123
124 #ifdef HAVE_AS_TLS
125 #undef TARGET_HAVE_TLS
126 #define TARGET_HAVE_TLS 1
127 #endif
128
129 static bool aarch64_lra_p (void);
130 static bool aarch64_composite_type_p (const_tree, enum machine_mode);
131 static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
132 const_tree,
133 enum machine_mode *, int *,
134 bool *);
135 static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
136 static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
137 static void aarch64_override_options_after_change (void);
138 static bool aarch64_vector_mode_supported_p (enum machine_mode);
139 static unsigned bit_count (unsigned HOST_WIDE_INT);
140 static bool aarch64_const_vec_all_same_int_p (rtx,
141 HOST_WIDE_INT, HOST_WIDE_INT);
142
143 static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
144 const unsigned char *sel);
145 static int aarch64_address_cost (rtx, enum machine_mode, addr_space_t, bool);
146
147 /* The processor for which instructions should be scheduled. */
148 enum aarch64_processor aarch64_tune = cortexa53;
149
150 /* The current tuning set. */
151 const struct tune_params *aarch64_tune_params;
152
153 /* Mask to specify which instructions we are allowed to generate. */
154 unsigned long aarch64_isa_flags = 0;
155
156 /* Mask to specify which instruction scheduling options should be used. */
157 unsigned long aarch64_tune_flags = 0;
158
159 /* Tuning parameters. */
160
161 #if HAVE_DESIGNATED_INITIALIZERS
162 #define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
163 #else
164 #define NAMED_PARAM(NAME, VAL) (VAL)
165 #endif
166
167 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
168 __extension__
169 #endif
170
171 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
172 __extension__
173 #endif
174 static const struct cpu_addrcost_table generic_addrcost_table =
175 {
176 #if HAVE_DESIGNATED_INITIALIZERS
177 .addr_scale_costs =
178 #endif
179 {
180 NAMED_PARAM (hi, 0),
181 NAMED_PARAM (si, 0),
182 NAMED_PARAM (di, 0),
183 NAMED_PARAM (ti, 0),
184 },
185 NAMED_PARAM (pre_modify, 0),
186 NAMED_PARAM (post_modify, 0),
187 NAMED_PARAM (register_offset, 0),
188 NAMED_PARAM (register_extend, 0),
189 NAMED_PARAM (imm_offset, 0)
190 };
191
192 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
193 __extension__
194 #endif
195 static const struct cpu_addrcost_table cortexa57_addrcost_table =
196 {
197 #if HAVE_DESIGNATED_INITIALIZERS
198 .addr_scale_costs =
199 #endif
200 {
201 NAMED_PARAM (hi, 1),
202 NAMED_PARAM (si, 0),
203 NAMED_PARAM (di, 0),
204 NAMED_PARAM (ti, 1),
205 },
206 NAMED_PARAM (pre_modify, 0),
207 NAMED_PARAM (post_modify, 0),
208 NAMED_PARAM (register_offset, 0),
209 NAMED_PARAM (register_extend, 0),
210 NAMED_PARAM (imm_offset, 0),
211 };
212
213 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
214 __extension__
215 #endif
216 static const struct cpu_regmove_cost generic_regmove_cost =
217 {
218 NAMED_PARAM (GP2GP, 1),
219 NAMED_PARAM (GP2FP, 2),
220 NAMED_PARAM (FP2GP, 2),
221 /* We currently do not provide direct support for TFmode Q->Q move.
222 Therefore we need to raise the cost above 2 in order to have
223 reload handle the situation. */
224 NAMED_PARAM (FP2FP, 4)
225 };
226
227 /* Generic costs for vector insn classes. */
228 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
229 __extension__
230 #endif
231 static const struct cpu_vector_cost generic_vector_cost =
232 {
233 NAMED_PARAM (scalar_stmt_cost, 1),
234 NAMED_PARAM (scalar_load_cost, 1),
235 NAMED_PARAM (scalar_store_cost, 1),
236 NAMED_PARAM (vec_stmt_cost, 1),
237 NAMED_PARAM (vec_to_scalar_cost, 1),
238 NAMED_PARAM (scalar_to_vec_cost, 1),
239 NAMED_PARAM (vec_align_load_cost, 1),
240 NAMED_PARAM (vec_unalign_load_cost, 1),
241 NAMED_PARAM (vec_unalign_store_cost, 1),
242 NAMED_PARAM (vec_store_cost, 1),
243 NAMED_PARAM (cond_taken_branch_cost, 3),
244 NAMED_PARAM (cond_not_taken_branch_cost, 1)
245 };
246
247 /* Generic costs for vector insn classes. */
248 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
249 __extension__
250 #endif
251 static const struct cpu_vector_cost cortexa57_vector_cost =
252 {
253 NAMED_PARAM (scalar_stmt_cost, 1),
254 NAMED_PARAM (scalar_load_cost, 4),
255 NAMED_PARAM (scalar_store_cost, 1),
256 NAMED_PARAM (vec_stmt_cost, 3),
257 NAMED_PARAM (vec_to_scalar_cost, 8),
258 NAMED_PARAM (scalar_to_vec_cost, 8),
259 NAMED_PARAM (vec_align_load_cost, 5),
260 NAMED_PARAM (vec_unalign_load_cost, 5),
261 NAMED_PARAM (vec_unalign_store_cost, 1),
262 NAMED_PARAM (vec_store_cost, 1),
263 NAMED_PARAM (cond_taken_branch_cost, 1),
264 NAMED_PARAM (cond_not_taken_branch_cost, 1)
265 };
266
267 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
268 __extension__
269 #endif
270 static const struct tune_params generic_tunings =
271 {
272 &cortexa57_extra_costs,
273 &generic_addrcost_table,
274 &generic_regmove_cost,
275 &generic_vector_cost,
276 NAMED_PARAM (memmov_cost, 4),
277 NAMED_PARAM (issue_rate, 2)
278 };
279
280 static const struct tune_params cortexa53_tunings =
281 {
282 &cortexa53_extra_costs,
283 &generic_addrcost_table,
284 &generic_regmove_cost,
285 &generic_vector_cost,
286 NAMED_PARAM (memmov_cost, 4),
287 NAMED_PARAM (issue_rate, 2)
288 };
289
290 static const struct tune_params cortexa57_tunings =
291 {
292 &cortexa57_extra_costs,
293 &cortexa57_addrcost_table,
294 &generic_regmove_cost,
295 &cortexa57_vector_cost,
296 NAMED_PARAM (memmov_cost, 4),
297 NAMED_PARAM (issue_rate, 3)
298 };
299
300 /* A processor implementing AArch64. */
301 struct processor
302 {
303 const char *const name;
304 enum aarch64_processor core;
305 const char *arch;
306 const unsigned long flags;
307 const struct tune_params *const tune;
308 };
309
310 /* Processor cores implementing AArch64. */
311 static const struct processor all_cores[] =
312 {
313 #define AARCH64_CORE(NAME, X, IDENT, ARCH, FLAGS, COSTS) \
314 {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
315 #include "aarch64-cores.def"
316 #undef AARCH64_CORE
317 {"generic", cortexa53, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings},
318 {NULL, aarch64_none, NULL, 0, NULL}
319 };
320
321 /* Architectures implementing AArch64. */
322 static const struct processor all_architectures[] =
323 {
324 #define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
325 {NAME, CORE, #ARCH, FLAGS, NULL},
326 #include "aarch64-arches.def"
327 #undef AARCH64_ARCH
328 {NULL, aarch64_none, NULL, 0, NULL}
329 };
330
331 /* Target specification. These are populated as commandline arguments
332 are processed, or NULL if not specified. */
333 static const struct processor *selected_arch;
334 static const struct processor *selected_cpu;
335 static const struct processor *selected_tune;
336
337 #define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
338
339 /* An ISA extension in the co-processor and main instruction set space. */
340 struct aarch64_option_extension
341 {
342 const char *const name;
343 const unsigned long flags_on;
344 const unsigned long flags_off;
345 };
346
347 /* ISA extensions in AArch64. */
348 static const struct aarch64_option_extension all_extensions[] =
349 {
350 #define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
351 {NAME, FLAGS_ON, FLAGS_OFF},
352 #include "aarch64-option-extensions.def"
353 #undef AARCH64_OPT_EXTENSION
354 {NULL, 0, 0}
355 };
356
357 /* Used to track the size of an address when generating a pre/post
358 increment address. */
359 static enum machine_mode aarch64_memory_reference_mode;
360
361 /* Used to force GTY into this file. */
362 static GTY(()) int gty_dummy;
363
364 /* A table of valid AArch64 "bitmask immediate" values for
365 logical instructions. */
366
367 #define AARCH64_NUM_BITMASKS 5334
368 static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS];
369
370 typedef enum aarch64_cond_code
371 {
372 AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
373 AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
374 AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
375 }
376 aarch64_cc;
377
378 #define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
379
380 /* The condition codes of the processor, and the inverse function. */
381 static const char * const aarch64_condition_codes[] =
382 {
383 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
384 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
385 };
386
387 /* Provide a mapping from gcc register numbers to dwarf register numbers. */
388 unsigned
389 aarch64_dbx_register_number (unsigned regno)
390 {
391 if (GP_REGNUM_P (regno))
392 return AARCH64_DWARF_R0 + regno - R0_REGNUM;
393 else if (regno == SP_REGNUM)
394 return AARCH64_DWARF_SP;
395 else if (FP_REGNUM_P (regno))
396 return AARCH64_DWARF_V0 + regno - V0_REGNUM;
397
398 /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
399 equivalent DWARF register. */
400 return DWARF_FRAME_REGISTERS;
401 }
402
403 /* Return TRUE if MODE is any of the large INT modes. */
404 static bool
405 aarch64_vect_struct_mode_p (enum machine_mode mode)
406 {
407 return mode == OImode || mode == CImode || mode == XImode;
408 }
409
410 /* Return TRUE if MODE is any of the vector modes. */
411 static bool
412 aarch64_vector_mode_p (enum machine_mode mode)
413 {
414 return aarch64_vector_mode_supported_p (mode)
415 || aarch64_vect_struct_mode_p (mode);
416 }
417
418 /* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
419 static bool
420 aarch64_array_mode_supported_p (enum machine_mode mode,
421 unsigned HOST_WIDE_INT nelems)
422 {
423 if (TARGET_SIMD
424 && AARCH64_VALID_SIMD_QREG_MODE (mode)
425 && (nelems >= 2 && nelems <= 4))
426 return true;
427
428 return false;
429 }
430
431 /* Implement HARD_REGNO_NREGS. */
432
433 int
434 aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode)
435 {
436 switch (aarch64_regno_regclass (regno))
437 {
438 case FP_REGS:
439 case FP_LO_REGS:
440 return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
441 default:
442 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
443 }
444 gcc_unreachable ();
445 }
446
447 /* Implement HARD_REGNO_MODE_OK. */
448
449 int
450 aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode)
451 {
452 if (GET_MODE_CLASS (mode) == MODE_CC)
453 return regno == CC_REGNUM;
454
455 if (regno == SP_REGNUM)
456 /* The purpose of comparing with ptr_mode is to support the
457 global register variable associated with the stack pointer
458 register via the syntax of asm ("wsp") in ILP32. */
459 return mode == Pmode || mode == ptr_mode;
460
461 if (regno == FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM)
462 return mode == Pmode;
463
464 if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
465 return 1;
466
467 if (FP_REGNUM_P (regno))
468 {
469 if (aarch64_vect_struct_mode_p (mode))
470 return
471 (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
472 else
473 return 1;
474 }
475
476 return 0;
477 }
478
479 /* Implement HARD_REGNO_CALLER_SAVE_MODE. */
480 enum machine_mode
481 aarch64_hard_regno_caller_save_mode (unsigned regno, unsigned nregs,
482 enum machine_mode mode)
483 {
484 /* Handle modes that fit within single registers. */
485 if (nregs == 1 && GET_MODE_SIZE (mode) <= 16)
486 {
487 if (GET_MODE_SIZE (mode) >= 4)
488 return mode;
489 else
490 return SImode;
491 }
492 /* Fall back to generic for multi-reg and very large modes. */
493 else
494 return choose_hard_reg_mode (regno, nregs, false);
495 }
496
497 /* Return true if calls to DECL should be treated as
498 long-calls (ie called via a register). */
499 static bool
500 aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
501 {
502 return false;
503 }
504
505 /* Return true if calls to symbol-ref SYM should be treated as
506 long-calls (ie called via a register). */
507 bool
508 aarch64_is_long_call_p (rtx sym)
509 {
510 return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
511 }
512
513 /* Return true if the offsets to a zero/sign-extract operation
514 represent an expression that matches an extend operation. The
515 operands represent the paramters from
516
517 (extract:MODE (mult (reg) (MULT_IMM)) (EXTRACT_IMM) (const_int 0)). */
518 bool
519 aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm,
520 rtx extract_imm)
521 {
522 HOST_WIDE_INT mult_val, extract_val;
523
524 if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
525 return false;
526
527 mult_val = INTVAL (mult_imm);
528 extract_val = INTVAL (extract_imm);
529
530 if (extract_val > 8
531 && extract_val < GET_MODE_BITSIZE (mode)
532 && exact_log2 (extract_val & ~7) > 0
533 && (extract_val & 7) <= 4
534 && mult_val == (1 << (extract_val & 7)))
535 return true;
536
537 return false;
538 }
539
540 /* Emit an insn that's a simple single-set. Both the operands must be
541 known to be valid. */
542 inline static rtx
543 emit_set_insn (rtx x, rtx y)
544 {
545 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
546 }
547
548 /* X and Y are two things to compare using CODE. Emit the compare insn and
549 return the rtx for register 0 in the proper mode. */
550 rtx
551 aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
552 {
553 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
554 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
555
556 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
557 return cc_reg;
558 }
559
560 /* Build the SYMBOL_REF for __tls_get_addr. */
561
562 static GTY(()) rtx tls_get_addr_libfunc;
563
564 rtx
565 aarch64_tls_get_addr (void)
566 {
567 if (!tls_get_addr_libfunc)
568 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
569 return tls_get_addr_libfunc;
570 }
571
572 /* Return the TLS model to use for ADDR. */
573
574 static enum tls_model
575 tls_symbolic_operand_type (rtx addr)
576 {
577 enum tls_model tls_kind = TLS_MODEL_NONE;
578 rtx sym, addend;
579
580 if (GET_CODE (addr) == CONST)
581 {
582 split_const (addr, &sym, &addend);
583 if (GET_CODE (sym) == SYMBOL_REF)
584 tls_kind = SYMBOL_REF_TLS_MODEL (sym);
585 }
586 else if (GET_CODE (addr) == SYMBOL_REF)
587 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
588
589 return tls_kind;
590 }
591
592 /* We'll allow lo_sum's in addresses in our legitimate addresses
593 so that combine would take care of combining addresses where
594 necessary, but for generation purposes, we'll generate the address
595 as :
596 RTL Absolute
597 tmp = hi (symbol_ref); adrp x1, foo
598 dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
599 nop
600
601 PIC TLS
602 adrp x1, :got:foo adrp tmp, :tlsgd:foo
603 ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
604 bl __tls_get_addr
605 nop
606
607 Load TLS symbol, depending on TLS mechanism and TLS access model.
608
609 Global Dynamic - Traditional TLS:
610 adrp tmp, :tlsgd:imm
611 add dest, tmp, #:tlsgd_lo12:imm
612 bl __tls_get_addr
613
614 Global Dynamic - TLS Descriptors:
615 adrp dest, :tlsdesc:imm
616 ldr tmp, [dest, #:tlsdesc_lo12:imm]
617 add dest, dest, #:tlsdesc_lo12:imm
618 blr tmp
619 mrs tp, tpidr_el0
620 add dest, dest, tp
621
622 Initial Exec:
623 mrs tp, tpidr_el0
624 adrp tmp, :gottprel:imm
625 ldr dest, [tmp, #:gottprel_lo12:imm]
626 add dest, dest, tp
627
628 Local Exec:
629 mrs tp, tpidr_el0
630 add t0, tp, #:tprel_hi12:imm
631 add t0, #:tprel_lo12_nc:imm
632 */
633
634 static void
635 aarch64_load_symref_appropriately (rtx dest, rtx imm,
636 enum aarch64_symbol_type type)
637 {
638 switch (type)
639 {
640 case SYMBOL_SMALL_ABSOLUTE:
641 {
642 /* In ILP32, the mode of dest can be either SImode or DImode. */
643 rtx tmp_reg = dest;
644 enum machine_mode mode = GET_MODE (dest);
645
646 gcc_assert (mode == Pmode || mode == ptr_mode);
647
648 if (can_create_pseudo_p ())
649 tmp_reg = gen_reg_rtx (mode);
650
651 emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
652 emit_insn (gen_add_losym (dest, tmp_reg, imm));
653 return;
654 }
655
656 case SYMBOL_TINY_ABSOLUTE:
657 emit_insn (gen_rtx_SET (Pmode, dest, imm));
658 return;
659
660 case SYMBOL_SMALL_GOT:
661 {
662 /* In ILP32, the mode of dest can be either SImode or DImode,
663 while the got entry is always of SImode size. The mode of
664 dest depends on how dest is used: if dest is assigned to a
665 pointer (e.g. in the memory), it has SImode; it may have
666 DImode if dest is dereferenced to access the memeory.
667 This is why we have to handle three different ldr_got_small
668 patterns here (two patterns for ILP32). */
669 rtx tmp_reg = dest;
670 enum machine_mode mode = GET_MODE (dest);
671
672 if (can_create_pseudo_p ())
673 tmp_reg = gen_reg_rtx (mode);
674
675 emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
676 if (mode == ptr_mode)
677 {
678 if (mode == DImode)
679 emit_insn (gen_ldr_got_small_di (dest, tmp_reg, imm));
680 else
681 emit_insn (gen_ldr_got_small_si (dest, tmp_reg, imm));
682 }
683 else
684 {
685 gcc_assert (mode == Pmode);
686 emit_insn (gen_ldr_got_small_sidi (dest, tmp_reg, imm));
687 }
688
689 return;
690 }
691
692 case SYMBOL_SMALL_TLSGD:
693 {
694 rtx_insn *insns;
695 rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
696
697 start_sequence ();
698 aarch64_emit_call_insn (gen_tlsgd_small (result, imm));
699 insns = get_insns ();
700 end_sequence ();
701
702 RTL_CONST_CALL_P (insns) = 1;
703 emit_libcall_block (insns, dest, result, imm);
704 return;
705 }
706
707 case SYMBOL_SMALL_TLSDESC:
708 {
709 enum machine_mode mode = GET_MODE (dest);
710 rtx x0 = gen_rtx_REG (mode, R0_REGNUM);
711 rtx tp;
712
713 gcc_assert (mode == Pmode || mode == ptr_mode);
714
715 /* In ILP32, the got entry is always of SImode size. Unlike
716 small GOT, the dest is fixed at reg 0. */
717 if (TARGET_ILP32)
718 emit_insn (gen_tlsdesc_small_si (imm));
719 else
720 emit_insn (gen_tlsdesc_small_di (imm));
721 tp = aarch64_load_tp (NULL);
722
723 if (mode != Pmode)
724 tp = gen_lowpart (mode, tp);
725
726 emit_insn (gen_rtx_SET (mode, dest, gen_rtx_PLUS (mode, tp, x0)));
727 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
728 return;
729 }
730
731 case SYMBOL_SMALL_GOTTPREL:
732 {
733 /* In ILP32, the mode of dest can be either SImode or DImode,
734 while the got entry is always of SImode size. The mode of
735 dest depends on how dest is used: if dest is assigned to a
736 pointer (e.g. in the memory), it has SImode; it may have
737 DImode if dest is dereferenced to access the memeory.
738 This is why we have to handle three different tlsie_small
739 patterns here (two patterns for ILP32). */
740 enum machine_mode mode = GET_MODE (dest);
741 rtx tmp_reg = gen_reg_rtx (mode);
742 rtx tp = aarch64_load_tp (NULL);
743
744 if (mode == ptr_mode)
745 {
746 if (mode == DImode)
747 emit_insn (gen_tlsie_small_di (tmp_reg, imm));
748 else
749 {
750 emit_insn (gen_tlsie_small_si (tmp_reg, imm));
751 tp = gen_lowpart (mode, tp);
752 }
753 }
754 else
755 {
756 gcc_assert (mode == Pmode);
757 emit_insn (gen_tlsie_small_sidi (tmp_reg, imm));
758 }
759
760 emit_insn (gen_rtx_SET (mode, dest, gen_rtx_PLUS (mode, tp, tmp_reg)));
761 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
762 return;
763 }
764
765 case SYMBOL_SMALL_TPREL:
766 {
767 rtx tp = aarch64_load_tp (NULL);
768 emit_insn (gen_tlsle_small (dest, tp, imm));
769 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
770 return;
771 }
772
773 case SYMBOL_TINY_GOT:
774 emit_insn (gen_ldr_got_tiny (dest, imm));
775 return;
776
777 default:
778 gcc_unreachable ();
779 }
780 }
781
782 /* Emit a move from SRC to DEST. Assume that the move expanders can
783 handle all moves if !can_create_pseudo_p (). The distinction is
784 important because, unlike emit_move_insn, the move expanders know
785 how to force Pmode objects into the constant pool even when the
786 constant pool address is not itself legitimate. */
787 static rtx
788 aarch64_emit_move (rtx dest, rtx src)
789 {
790 return (can_create_pseudo_p ()
791 ? emit_move_insn (dest, src)
792 : emit_move_insn_1 (dest, src));
793 }
794
795 /* Split a 128-bit move operation into two 64-bit move operations,
796 taking care to handle partial overlap of register to register
797 copies. Special cases are needed when moving between GP regs and
798 FP regs. SRC can be a register, constant or memory; DST a register
799 or memory. If either operand is memory it must not have any side
800 effects. */
801 void
802 aarch64_split_128bit_move (rtx dst, rtx src)
803 {
804 rtx dst_lo, dst_hi;
805 rtx src_lo, src_hi;
806
807 enum machine_mode mode = GET_MODE (dst);
808
809 gcc_assert (mode == TImode || mode == TFmode);
810 gcc_assert (!(side_effects_p (src) || side_effects_p (dst)));
811 gcc_assert (mode == GET_MODE (src) || GET_MODE (src) == VOIDmode);
812
813 if (REG_P (dst) && REG_P (src))
814 {
815 int src_regno = REGNO (src);
816 int dst_regno = REGNO (dst);
817
818 /* Handle FP <-> GP regs. */
819 if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
820 {
821 src_lo = gen_lowpart (word_mode, src);
822 src_hi = gen_highpart (word_mode, src);
823
824 if (mode == TImode)
825 {
826 emit_insn (gen_aarch64_movtilow_di (dst, src_lo));
827 emit_insn (gen_aarch64_movtihigh_di (dst, src_hi));
828 }
829 else
830 {
831 emit_insn (gen_aarch64_movtflow_di (dst, src_lo));
832 emit_insn (gen_aarch64_movtfhigh_di (dst, src_hi));
833 }
834 return;
835 }
836 else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
837 {
838 dst_lo = gen_lowpart (word_mode, dst);
839 dst_hi = gen_highpart (word_mode, dst);
840
841 if (mode == TImode)
842 {
843 emit_insn (gen_aarch64_movdi_tilow (dst_lo, src));
844 emit_insn (gen_aarch64_movdi_tihigh (dst_hi, src));
845 }
846 else
847 {
848 emit_insn (gen_aarch64_movdi_tflow (dst_lo, src));
849 emit_insn (gen_aarch64_movdi_tfhigh (dst_hi, src));
850 }
851 return;
852 }
853 }
854
855 dst_lo = gen_lowpart (word_mode, dst);
856 dst_hi = gen_highpart (word_mode, dst);
857 src_lo = gen_lowpart (word_mode, src);
858 src_hi = gen_highpart_mode (word_mode, mode, src);
859
860 /* At most one pairing may overlap. */
861 if (reg_overlap_mentioned_p (dst_lo, src_hi))
862 {
863 aarch64_emit_move (dst_hi, src_hi);
864 aarch64_emit_move (dst_lo, src_lo);
865 }
866 else
867 {
868 aarch64_emit_move (dst_lo, src_lo);
869 aarch64_emit_move (dst_hi, src_hi);
870 }
871 }
872
873 bool
874 aarch64_split_128bit_move_p (rtx dst, rtx src)
875 {
876 return (! REG_P (src)
877 || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
878 }
879
880 /* Split a complex SIMD combine. */
881
882 void
883 aarch64_split_simd_combine (rtx dst, rtx src1, rtx src2)
884 {
885 enum machine_mode src_mode = GET_MODE (src1);
886 enum machine_mode dst_mode = GET_MODE (dst);
887
888 gcc_assert (VECTOR_MODE_P (dst_mode));
889
890 if (REG_P (dst) && REG_P (src1) && REG_P (src2))
891 {
892 rtx (*gen) (rtx, rtx, rtx);
893
894 switch (src_mode)
895 {
896 case V8QImode:
897 gen = gen_aarch64_simd_combinev8qi;
898 break;
899 case V4HImode:
900 gen = gen_aarch64_simd_combinev4hi;
901 break;
902 case V2SImode:
903 gen = gen_aarch64_simd_combinev2si;
904 break;
905 case V2SFmode:
906 gen = gen_aarch64_simd_combinev2sf;
907 break;
908 case DImode:
909 gen = gen_aarch64_simd_combinedi;
910 break;
911 case DFmode:
912 gen = gen_aarch64_simd_combinedf;
913 break;
914 default:
915 gcc_unreachable ();
916 }
917
918 emit_insn (gen (dst, src1, src2));
919 return;
920 }
921 }
922
923 /* Split a complex SIMD move. */
924
925 void
926 aarch64_split_simd_move (rtx dst, rtx src)
927 {
928 enum machine_mode src_mode = GET_MODE (src);
929 enum machine_mode dst_mode = GET_MODE (dst);
930
931 gcc_assert (VECTOR_MODE_P (dst_mode));
932
933 if (REG_P (dst) && REG_P (src))
934 {
935 rtx (*gen) (rtx, rtx);
936
937 gcc_assert (VECTOR_MODE_P (src_mode));
938
939 switch (src_mode)
940 {
941 case V16QImode:
942 gen = gen_aarch64_split_simd_movv16qi;
943 break;
944 case V8HImode:
945 gen = gen_aarch64_split_simd_movv8hi;
946 break;
947 case V4SImode:
948 gen = gen_aarch64_split_simd_movv4si;
949 break;
950 case V2DImode:
951 gen = gen_aarch64_split_simd_movv2di;
952 break;
953 case V4SFmode:
954 gen = gen_aarch64_split_simd_movv4sf;
955 break;
956 case V2DFmode:
957 gen = gen_aarch64_split_simd_movv2df;
958 break;
959 default:
960 gcc_unreachable ();
961 }
962
963 emit_insn (gen (dst, src));
964 return;
965 }
966 }
967
968 static rtx
969 aarch64_force_temporary (enum machine_mode mode, rtx x, rtx value)
970 {
971 if (can_create_pseudo_p ())
972 return force_reg (mode, value);
973 else
974 {
975 x = aarch64_emit_move (x, value);
976 return x;
977 }
978 }
979
980
981 static rtx
982 aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
983 {
984 if (!aarch64_plus_immediate (GEN_INT (offset), mode))
985 {
986 rtx high;
987 /* Load the full offset into a register. This
988 might be improvable in the future. */
989 high = GEN_INT (offset);
990 offset = 0;
991 high = aarch64_force_temporary (mode, temp, high);
992 reg = aarch64_force_temporary (mode, temp,
993 gen_rtx_PLUS (mode, high, reg));
994 }
995 return plus_constant (mode, reg, offset);
996 }
997
998 void
999 aarch64_expand_mov_immediate (rtx dest, rtx imm)
1000 {
1001 enum machine_mode mode = GET_MODE (dest);
1002 unsigned HOST_WIDE_INT mask;
1003 int i;
1004 bool first;
1005 unsigned HOST_WIDE_INT val;
1006 bool subtargets;
1007 rtx subtarget;
1008 int one_match, zero_match, first_not_ffff_match;
1009
1010 gcc_assert (mode == SImode || mode == DImode);
1011
1012 /* Check on what type of symbol it is. */
1013 if (GET_CODE (imm) == SYMBOL_REF
1014 || GET_CODE (imm) == LABEL_REF
1015 || GET_CODE (imm) == CONST)
1016 {
1017 rtx mem, base, offset;
1018 enum aarch64_symbol_type sty;
1019
1020 /* If we have (const (plus symbol offset)), separate out the offset
1021 before we start classifying the symbol. */
1022 split_const (imm, &base, &offset);
1023
1024 sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR);
1025 switch (sty)
1026 {
1027 case SYMBOL_FORCE_TO_MEM:
1028 if (offset != const0_rtx
1029 && targetm.cannot_force_const_mem (mode, imm))
1030 {
1031 gcc_assert (can_create_pseudo_p ());
1032 base = aarch64_force_temporary (mode, dest, base);
1033 base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
1034 aarch64_emit_move (dest, base);
1035 return;
1036 }
1037 mem = force_const_mem (ptr_mode, imm);
1038 gcc_assert (mem);
1039 if (mode != ptr_mode)
1040 mem = gen_rtx_ZERO_EXTEND (mode, mem);
1041 emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
1042 return;
1043
1044 case SYMBOL_SMALL_TLSGD:
1045 case SYMBOL_SMALL_TLSDESC:
1046 case SYMBOL_SMALL_GOTTPREL:
1047 case SYMBOL_SMALL_GOT:
1048 case SYMBOL_TINY_GOT:
1049 if (offset != const0_rtx)
1050 {
1051 gcc_assert(can_create_pseudo_p ());
1052 base = aarch64_force_temporary (mode, dest, base);
1053 base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
1054 aarch64_emit_move (dest, base);
1055 return;
1056 }
1057 /* FALLTHRU */
1058
1059 case SYMBOL_SMALL_TPREL:
1060 case SYMBOL_SMALL_ABSOLUTE:
1061 case SYMBOL_TINY_ABSOLUTE:
1062 aarch64_load_symref_appropriately (dest, imm, sty);
1063 return;
1064
1065 default:
1066 gcc_unreachable ();
1067 }
1068 }
1069
1070 if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode))
1071 {
1072 emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
1073 return;
1074 }
1075
1076 if (!CONST_INT_P (imm))
1077 {
1078 if (GET_CODE (imm) == HIGH)
1079 emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
1080 else
1081 {
1082 rtx mem = force_const_mem (mode, imm);
1083 gcc_assert (mem);
1084 emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
1085 }
1086
1087 return;
1088 }
1089
1090 if (mode == SImode)
1091 {
1092 /* We know we can't do this in 1 insn, and we must be able to do it
1093 in two; so don't mess around looking for sequences that don't buy
1094 us anything. */
1095 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff)));
1096 emit_insn (gen_insv_immsi (dest, GEN_INT (16),
1097 GEN_INT ((INTVAL (imm) >> 16) & 0xffff)));
1098 return;
1099 }
1100
1101 /* Remaining cases are all for DImode. */
1102
1103 val = INTVAL (imm);
1104 subtargets = optimize && can_create_pseudo_p ();
1105
1106 one_match = 0;
1107 zero_match = 0;
1108 mask = 0xffff;
1109 first_not_ffff_match = -1;
1110
1111 for (i = 0; i < 64; i += 16, mask <<= 16)
1112 {
1113 if ((val & mask) == mask)
1114 one_match++;
1115 else
1116 {
1117 if (first_not_ffff_match < 0)
1118 first_not_ffff_match = i;
1119 if ((val & mask) == 0)
1120 zero_match++;
1121 }
1122 }
1123
1124 if (one_match == 2)
1125 {
1126 /* Set one of the quarters and then insert back into result. */
1127 mask = 0xffffll << first_not_ffff_match;
1128 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask)));
1129 emit_insn (gen_insv_immdi (dest, GEN_INT (first_not_ffff_match),
1130 GEN_INT ((val >> first_not_ffff_match)
1131 & 0xffff)));
1132 return;
1133 }
1134
1135 if (zero_match == 2)
1136 goto simple_sequence;
1137
1138 mask = 0x0ffff0000UL;
1139 for (i = 16; i < 64; i += 16, mask <<= 16)
1140 {
1141 HOST_WIDE_INT comp = mask & ~(mask - 1);
1142
1143 if (aarch64_uimm12_shift (val - (val & mask)))
1144 {
1145 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1146
1147 emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask)));
1148 emit_insn (gen_adddi3 (dest, subtarget,
1149 GEN_INT (val - (val & mask))));
1150 return;
1151 }
1152 else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask))))
1153 {
1154 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1155
1156 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1157 GEN_INT ((val + comp) & mask)));
1158 emit_insn (gen_adddi3 (dest, subtarget,
1159 GEN_INT (val - ((val + comp) & mask))));
1160 return;
1161 }
1162 else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask)))
1163 {
1164 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1165
1166 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1167 GEN_INT ((val - comp) | ~mask)));
1168 emit_insn (gen_adddi3 (dest, subtarget,
1169 GEN_INT (val - ((val - comp) | ~mask))));
1170 return;
1171 }
1172 else if (aarch64_uimm12_shift (-(val - (val | ~mask))))
1173 {
1174 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1175
1176 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1177 GEN_INT (val | ~mask)));
1178 emit_insn (gen_adddi3 (dest, subtarget,
1179 GEN_INT (val - (val | ~mask))));
1180 return;
1181 }
1182 }
1183
1184 /* See if we can do it by arithmetically combining two
1185 immediates. */
1186 for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
1187 {
1188 int j;
1189 mask = 0xffff;
1190
1191 if (aarch64_uimm12_shift (val - aarch64_bitmasks[i])
1192 || aarch64_uimm12_shift (-val + aarch64_bitmasks[i]))
1193 {
1194 subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
1195 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1196 GEN_INT (aarch64_bitmasks[i])));
1197 emit_insn (gen_adddi3 (dest, subtarget,
1198 GEN_INT (val - aarch64_bitmasks[i])));
1199 return;
1200 }
1201
1202 for (j = 0; j < 64; j += 16, mask <<= 16)
1203 {
1204 if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask))
1205 {
1206 emit_insn (gen_rtx_SET (VOIDmode, dest,
1207 GEN_INT (aarch64_bitmasks[i])));
1208 emit_insn (gen_insv_immdi (dest, GEN_INT (j),
1209 GEN_INT ((val >> j) & 0xffff)));
1210 return;
1211 }
1212 }
1213 }
1214
1215 /* See if we can do it by logically combining two immediates. */
1216 for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
1217 {
1218 if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i])
1219 {
1220 int j;
1221
1222 for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
1223 if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j]))
1224 {
1225 subtarget = subtargets ? gen_reg_rtx (mode) : dest;
1226 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1227 GEN_INT (aarch64_bitmasks[i])));
1228 emit_insn (gen_iordi3 (dest, subtarget,
1229 GEN_INT (aarch64_bitmasks[j])));
1230 return;
1231 }
1232 }
1233 else if ((val & aarch64_bitmasks[i]) == val)
1234 {
1235 int j;
1236
1237 for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
1238 if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i]))
1239 {
1240
1241 subtarget = subtargets ? gen_reg_rtx (mode) : dest;
1242 emit_insn (gen_rtx_SET (VOIDmode, subtarget,
1243 GEN_INT (aarch64_bitmasks[j])));
1244 emit_insn (gen_anddi3 (dest, subtarget,
1245 GEN_INT (aarch64_bitmasks[i])));
1246 return;
1247 }
1248 }
1249 }
1250
1251 if (one_match > zero_match)
1252 {
1253 /* Set either first three quarters or all but the third. */
1254 mask = 0xffffll << (16 - first_not_ffff_match);
1255 emit_insn (gen_rtx_SET (VOIDmode, dest,
1256 GEN_INT (val | mask | 0xffffffff00000000ull)));
1257
1258 /* Now insert other two quarters. */
1259 for (i = first_not_ffff_match + 16, mask <<= (first_not_ffff_match << 1);
1260 i < 64; i += 16, mask <<= 16)
1261 {
1262 if ((val & mask) != mask)
1263 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
1264 GEN_INT ((val >> i) & 0xffff)));
1265 }
1266 return;
1267 }
1268
1269 simple_sequence:
1270 first = true;
1271 mask = 0xffff;
1272 for (i = 0; i < 64; i += 16, mask <<= 16)
1273 {
1274 if ((val & mask) != 0)
1275 {
1276 if (first)
1277 {
1278 emit_insn (gen_rtx_SET (VOIDmode, dest,
1279 GEN_INT (val & mask)));
1280 first = false;
1281 }
1282 else
1283 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
1284 GEN_INT ((val >> i) & 0xffff)));
1285 }
1286 }
1287 }
1288
1289 static bool
1290 aarch64_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
1291 tree exp ATTRIBUTE_UNUSED)
1292 {
1293 /* Currently, always true. */
1294 return true;
1295 }
1296
1297 /* Implement TARGET_PASS_BY_REFERENCE. */
1298
1299 static bool
1300 aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
1301 enum machine_mode mode,
1302 const_tree type,
1303 bool named ATTRIBUTE_UNUSED)
1304 {
1305 HOST_WIDE_INT size;
1306 enum machine_mode dummymode;
1307 int nregs;
1308
1309 /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
1310 size = (mode == BLKmode && type)
1311 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
1312
1313 /* Aggregates are passed by reference based on their size. */
1314 if (type && AGGREGATE_TYPE_P (type))
1315 {
1316 size = int_size_in_bytes (type);
1317 }
1318
1319 /* Variable sized arguments are always returned by reference. */
1320 if (size < 0)
1321 return true;
1322
1323 /* Can this be a candidate to be passed in fp/simd register(s)? */
1324 if (aarch64_vfp_is_call_or_return_candidate (mode, type,
1325 &dummymode, &nregs,
1326 NULL))
1327 return false;
1328
1329 /* Arguments which are variable sized or larger than 2 registers are
1330 passed by reference unless they are a homogenous floating point
1331 aggregate. */
1332 return size > 2 * UNITS_PER_WORD;
1333 }
1334
1335 /* Return TRUE if VALTYPE is padded to its least significant bits. */
1336 static bool
1337 aarch64_return_in_msb (const_tree valtype)
1338 {
1339 enum machine_mode dummy_mode;
1340 int dummy_int;
1341
1342 /* Never happens in little-endian mode. */
1343 if (!BYTES_BIG_ENDIAN)
1344 return false;
1345
1346 /* Only composite types smaller than or equal to 16 bytes can
1347 be potentially returned in registers. */
1348 if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
1349 || int_size_in_bytes (valtype) <= 0
1350 || int_size_in_bytes (valtype) > 16)
1351 return false;
1352
1353 /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
1354 or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
1355 is always passed/returned in the least significant bits of fp/simd
1356 register(s). */
1357 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
1358 &dummy_mode, &dummy_int, NULL))
1359 return false;
1360
1361 return true;
1362 }
1363
1364 /* Implement TARGET_FUNCTION_VALUE.
1365 Define how to find the value returned by a function. */
1366
1367 static rtx
1368 aarch64_function_value (const_tree type, const_tree func,
1369 bool outgoing ATTRIBUTE_UNUSED)
1370 {
1371 enum machine_mode mode;
1372 int unsignedp;
1373 int count;
1374 enum machine_mode ag_mode;
1375
1376 mode = TYPE_MODE (type);
1377 if (INTEGRAL_TYPE_P (type))
1378 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
1379
1380 if (aarch64_return_in_msb (type))
1381 {
1382 HOST_WIDE_INT size = int_size_in_bytes (type);
1383
1384 if (size % UNITS_PER_WORD != 0)
1385 {
1386 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
1387 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
1388 }
1389 }
1390
1391 if (aarch64_vfp_is_call_or_return_candidate (mode, type,
1392 &ag_mode, &count, NULL))
1393 {
1394 if (!aarch64_composite_type_p (type, mode))
1395 {
1396 gcc_assert (count == 1 && mode == ag_mode);
1397 return gen_rtx_REG (mode, V0_REGNUM);
1398 }
1399 else
1400 {
1401 int i;
1402 rtx par;
1403
1404 par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
1405 for (i = 0; i < count; i++)
1406 {
1407 rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
1408 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
1409 GEN_INT (i * GET_MODE_SIZE (ag_mode)));
1410 XVECEXP (par, 0, i) = tmp;
1411 }
1412 return par;
1413 }
1414 }
1415 else
1416 return gen_rtx_REG (mode, R0_REGNUM);
1417 }
1418
1419 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.
1420 Return true if REGNO is the number of a hard register in which the values
1421 of called function may come back. */
1422
1423 static bool
1424 aarch64_function_value_regno_p (const unsigned int regno)
1425 {
1426 /* Maximum of 16 bytes can be returned in the general registers. Examples
1427 of 16-byte return values are: 128-bit integers and 16-byte small
1428 structures (excluding homogeneous floating-point aggregates). */
1429 if (regno == R0_REGNUM || regno == R1_REGNUM)
1430 return true;
1431
1432 /* Up to four fp/simd registers can return a function value, e.g. a
1433 homogeneous floating-point aggregate having four members. */
1434 if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
1435 return !TARGET_GENERAL_REGS_ONLY;
1436
1437 return false;
1438 }
1439
1440 /* Implement TARGET_RETURN_IN_MEMORY.
1441
1442 If the type T of the result of a function is such that
1443 void func (T arg)
1444 would require that arg be passed as a value in a register (or set of
1445 registers) according to the parameter passing rules, then the result
1446 is returned in the same registers as would be used for such an
1447 argument. */
1448
1449 static bool
1450 aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
1451 {
1452 HOST_WIDE_INT size;
1453 enum machine_mode ag_mode;
1454 int count;
1455
1456 if (!AGGREGATE_TYPE_P (type)
1457 && TREE_CODE (type) != COMPLEX_TYPE
1458 && TREE_CODE (type) != VECTOR_TYPE)
1459 /* Simple scalar types always returned in registers. */
1460 return false;
1461
1462 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
1463 type,
1464 &ag_mode,
1465 &count,
1466 NULL))
1467 return false;
1468
1469 /* Types larger than 2 registers returned in memory. */
1470 size = int_size_in_bytes (type);
1471 return (size < 0 || size > 2 * UNITS_PER_WORD);
1472 }
1473
1474 static bool
1475 aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode,
1476 const_tree type, int *nregs)
1477 {
1478 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1479 return aarch64_vfp_is_call_or_return_candidate (mode,
1480 type,
1481 &pcum->aapcs_vfp_rmode,
1482 nregs,
1483 NULL);
1484 }
1485
1486 /* Given MODE and TYPE of a function argument, return the alignment in
1487 bits. The idea is to suppress any stronger alignment requested by
1488 the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
1489 This is a helper function for local use only. */
1490
1491 static unsigned int
1492 aarch64_function_arg_alignment (enum machine_mode mode, const_tree type)
1493 {
1494 unsigned int alignment;
1495
1496 if (type)
1497 {
1498 if (!integer_zerop (TYPE_SIZE (type)))
1499 {
1500 if (TYPE_MODE (type) == mode)
1501 alignment = TYPE_ALIGN (type);
1502 else
1503 alignment = GET_MODE_ALIGNMENT (mode);
1504 }
1505 else
1506 alignment = 0;
1507 }
1508 else
1509 alignment = GET_MODE_ALIGNMENT (mode);
1510
1511 return alignment;
1512 }
1513
1514 /* Layout a function argument according to the AAPCS64 rules. The rule
1515 numbers refer to the rule numbers in the AAPCS64. */
1516
1517 static void
1518 aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode,
1519 const_tree type,
1520 bool named ATTRIBUTE_UNUSED)
1521 {
1522 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1523 int ncrn, nvrn, nregs;
1524 bool allocate_ncrn, allocate_nvrn;
1525 HOST_WIDE_INT size;
1526
1527 /* We need to do this once per argument. */
1528 if (pcum->aapcs_arg_processed)
1529 return;
1530
1531 pcum->aapcs_arg_processed = true;
1532
1533 /* Size in bytes, rounded to the nearest multiple of 8 bytes. */
1534 size
1535 = AARCH64_ROUND_UP (type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode),
1536 UNITS_PER_WORD);
1537
1538 allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
1539 allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
1540 mode,
1541 type,
1542 &nregs);
1543
1544 /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
1545 The following code thus handles passing by SIMD/FP registers first. */
1546
1547 nvrn = pcum->aapcs_nvrn;
1548
1549 /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
1550 and homogenous short-vector aggregates (HVA). */
1551 if (allocate_nvrn)
1552 {
1553 if (nvrn + nregs <= NUM_FP_ARG_REGS)
1554 {
1555 pcum->aapcs_nextnvrn = nvrn + nregs;
1556 if (!aarch64_composite_type_p (type, mode))
1557 {
1558 gcc_assert (nregs == 1);
1559 pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
1560 }
1561 else
1562 {
1563 rtx par;
1564 int i;
1565 par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
1566 for (i = 0; i < nregs; i++)
1567 {
1568 rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
1569 V0_REGNUM + nvrn + i);
1570 tmp = gen_rtx_EXPR_LIST
1571 (VOIDmode, tmp,
1572 GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
1573 XVECEXP (par, 0, i) = tmp;
1574 }
1575 pcum->aapcs_reg = par;
1576 }
1577 return;
1578 }
1579 else
1580 {
1581 /* C.3 NSRN is set to 8. */
1582 pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
1583 goto on_stack;
1584 }
1585 }
1586
1587 ncrn = pcum->aapcs_ncrn;
1588 nregs = size / UNITS_PER_WORD;
1589
1590 /* C6 - C9. though the sign and zero extension semantics are
1591 handled elsewhere. This is the case where the argument fits
1592 entirely general registers. */
1593 if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
1594 {
1595 unsigned int alignment = aarch64_function_arg_alignment (mode, type);
1596
1597 gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
1598
1599 /* C.8 if the argument has an alignment of 16 then the NGRN is
1600 rounded up to the next even number. */
1601 if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
1602 {
1603 ++ncrn;
1604 gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
1605 }
1606 /* NREGS can be 0 when e.g. an empty structure is to be passed.
1607 A reg is still generated for it, but the caller should be smart
1608 enough not to use it. */
1609 if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
1610 {
1611 pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
1612 }
1613 else
1614 {
1615 rtx par;
1616 int i;
1617
1618 par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
1619 for (i = 0; i < nregs; i++)
1620 {
1621 rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
1622 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
1623 GEN_INT (i * UNITS_PER_WORD));
1624 XVECEXP (par, 0, i) = tmp;
1625 }
1626 pcum->aapcs_reg = par;
1627 }
1628
1629 pcum->aapcs_nextncrn = ncrn + nregs;
1630 return;
1631 }
1632
1633 /* C.11 */
1634 pcum->aapcs_nextncrn = NUM_ARG_REGS;
1635
1636 /* The argument is passed on stack; record the needed number of words for
1637 this argument and align the total size if necessary. */
1638 on_stack:
1639 pcum->aapcs_stack_words = size / UNITS_PER_WORD;
1640 if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
1641 pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size,
1642 16 / UNITS_PER_WORD);
1643 return;
1644 }
1645
1646 /* Implement TARGET_FUNCTION_ARG. */
1647
1648 static rtx
1649 aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode,
1650 const_tree type, bool named)
1651 {
1652 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1653 gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
1654
1655 if (mode == VOIDmode)
1656 return NULL_RTX;
1657
1658 aarch64_layout_arg (pcum_v, mode, type, named);
1659 return pcum->aapcs_reg;
1660 }
1661
1662 void
1663 aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
1664 const_tree fntype ATTRIBUTE_UNUSED,
1665 rtx libname ATTRIBUTE_UNUSED,
1666 const_tree fndecl ATTRIBUTE_UNUSED,
1667 unsigned n_named ATTRIBUTE_UNUSED)
1668 {
1669 pcum->aapcs_ncrn = 0;
1670 pcum->aapcs_nvrn = 0;
1671 pcum->aapcs_nextncrn = 0;
1672 pcum->aapcs_nextnvrn = 0;
1673 pcum->pcs_variant = ARM_PCS_AAPCS64;
1674 pcum->aapcs_reg = NULL_RTX;
1675 pcum->aapcs_arg_processed = false;
1676 pcum->aapcs_stack_words = 0;
1677 pcum->aapcs_stack_size = 0;
1678
1679 return;
1680 }
1681
1682 static void
1683 aarch64_function_arg_advance (cumulative_args_t pcum_v,
1684 enum machine_mode mode,
1685 const_tree type,
1686 bool named)
1687 {
1688 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1689 if (pcum->pcs_variant == ARM_PCS_AAPCS64)
1690 {
1691 aarch64_layout_arg (pcum_v, mode, type, named);
1692 gcc_assert ((pcum->aapcs_reg != NULL_RTX)
1693 != (pcum->aapcs_stack_words != 0));
1694 pcum->aapcs_arg_processed = false;
1695 pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
1696 pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
1697 pcum->aapcs_stack_size += pcum->aapcs_stack_words;
1698 pcum->aapcs_stack_words = 0;
1699 pcum->aapcs_reg = NULL_RTX;
1700 }
1701 }
1702
1703 bool
1704 aarch64_function_arg_regno_p (unsigned regno)
1705 {
1706 return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
1707 || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
1708 }
1709
1710 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
1711 PARM_BOUNDARY bits of alignment, but will be given anything up
1712 to STACK_BOUNDARY bits if the type requires it. This makes sure
1713 that both before and after the layout of each argument, the Next
1714 Stacked Argument Address (NSAA) will have a minimum alignment of
1715 8 bytes. */
1716
1717 static unsigned int
1718 aarch64_function_arg_boundary (enum machine_mode mode, const_tree type)
1719 {
1720 unsigned int alignment = aarch64_function_arg_alignment (mode, type);
1721
1722 if (alignment < PARM_BOUNDARY)
1723 alignment = PARM_BOUNDARY;
1724 if (alignment > STACK_BOUNDARY)
1725 alignment = STACK_BOUNDARY;
1726 return alignment;
1727 }
1728
1729 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
1730
1731 Return true if an argument passed on the stack should be padded upwards,
1732 i.e. if the least-significant byte of the stack slot has useful data.
1733
1734 Small aggregate types are placed in the lowest memory address.
1735
1736 The related parameter passing rules are B.4, C.3, C.5 and C.14. */
1737
1738 bool
1739 aarch64_pad_arg_upward (enum machine_mode mode, const_tree type)
1740 {
1741 /* On little-endian targets, the least significant byte of every stack
1742 argument is passed at the lowest byte address of the stack slot. */
1743 if (!BYTES_BIG_ENDIAN)
1744 return true;
1745
1746 /* Otherwise, integral, floating-point and pointer types are padded downward:
1747 the least significant byte of a stack argument is passed at the highest
1748 byte address of the stack slot. */
1749 if (type
1750 ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)
1751 || POINTER_TYPE_P (type))
1752 : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
1753 return false;
1754
1755 /* Everything else padded upward, i.e. data in first byte of stack slot. */
1756 return true;
1757 }
1758
1759 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
1760
1761 It specifies padding for the last (may also be the only)
1762 element of a block move between registers and memory. If
1763 assuming the block is in the memory, padding upward means that
1764 the last element is padded after its highest significant byte,
1765 while in downward padding, the last element is padded at the
1766 its least significant byte side.
1767
1768 Small aggregates and small complex types are always padded
1769 upwards.
1770
1771 We don't need to worry about homogeneous floating-point or
1772 short-vector aggregates; their move is not affected by the
1773 padding direction determined here. Regardless of endianness,
1774 each element of such an aggregate is put in the least
1775 significant bits of a fp/simd register.
1776
1777 Return !BYTES_BIG_ENDIAN if the least significant byte of the
1778 register has useful data, and return the opposite if the most
1779 significant byte does. */
1780
1781 bool
1782 aarch64_pad_reg_upward (enum machine_mode mode, const_tree type,
1783 bool first ATTRIBUTE_UNUSED)
1784 {
1785
1786 /* Small composite types are always padded upward. */
1787 if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
1788 {
1789 HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
1790 : GET_MODE_SIZE (mode));
1791 if (size < 2 * UNITS_PER_WORD)
1792 return true;
1793 }
1794
1795 /* Otherwise, use the default padding. */
1796 return !BYTES_BIG_ENDIAN;
1797 }
1798
1799 static enum machine_mode
1800 aarch64_libgcc_cmp_return_mode (void)
1801 {
1802 return SImode;
1803 }
1804
1805 static bool
1806 aarch64_frame_pointer_required (void)
1807 {
1808 /* If the function contains dynamic stack allocations, we need to
1809 use the frame pointer to access the static parts of the frame. */
1810 if (cfun->calls_alloca)
1811 return true;
1812
1813 /* In aarch64_override_options_after_change
1814 flag_omit_leaf_frame_pointer turns off the frame pointer by
1815 default. Turn it back on now if we've not got a leaf
1816 function. */
1817 if (flag_omit_leaf_frame_pointer
1818 && (!crtl->is_leaf || df_regs_ever_live_p (LR_REGNUM)))
1819 return true;
1820
1821 return false;
1822 }
1823
1824 /* Mark the registers that need to be saved by the callee and calculate
1825 the size of the callee-saved registers area and frame record (both FP
1826 and LR may be omitted). */
1827 static void
1828 aarch64_layout_frame (void)
1829 {
1830 HOST_WIDE_INT offset = 0;
1831 int regno;
1832
1833 if (reload_completed && cfun->machine->frame.laid_out)
1834 return;
1835
1836 #define SLOT_NOT_REQUIRED (-2)
1837 #define SLOT_REQUIRED (-1)
1838
1839 cfun->machine->frame.wb_candidate1 = FIRST_PSEUDO_REGISTER;
1840 cfun->machine->frame.wb_candidate2 = FIRST_PSEUDO_REGISTER;
1841
1842 /* First mark all the registers that really need to be saved... */
1843 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
1844 cfun->machine->frame.reg_offset[regno] = SLOT_NOT_REQUIRED;
1845
1846 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1847 cfun->machine->frame.reg_offset[regno] = SLOT_NOT_REQUIRED;
1848
1849 /* ... that includes the eh data registers (if needed)... */
1850 if (crtl->calls_eh_return)
1851 for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
1852 cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)]
1853 = SLOT_REQUIRED;
1854
1855 /* ... and any callee saved register that dataflow says is live. */
1856 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
1857 if (df_regs_ever_live_p (regno)
1858 && !call_used_regs[regno])
1859 cfun->machine->frame.reg_offset[regno] = SLOT_REQUIRED;
1860
1861 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1862 if (df_regs_ever_live_p (regno)
1863 && !call_used_regs[regno])
1864 cfun->machine->frame.reg_offset[regno] = SLOT_REQUIRED;
1865
1866 if (frame_pointer_needed)
1867 {
1868 /* FP and LR are placed in the linkage record. */
1869 cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
1870 cfun->machine->frame.wb_candidate1 = R29_REGNUM;
1871 cfun->machine->frame.reg_offset[R30_REGNUM] = UNITS_PER_WORD;
1872 cfun->machine->frame.wb_candidate2 = R30_REGNUM;
1873 cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD;
1874 offset += 2 * UNITS_PER_WORD;
1875 }
1876
1877 /* Now assign stack slots for them. */
1878 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
1879 if (cfun->machine->frame.reg_offset[regno] == SLOT_REQUIRED)
1880 {
1881 cfun->machine->frame.reg_offset[regno] = offset;
1882 if (cfun->machine->frame.wb_candidate1 == FIRST_PSEUDO_REGISTER)
1883 cfun->machine->frame.wb_candidate1 = regno;
1884 else if (cfun->machine->frame.wb_candidate2 == FIRST_PSEUDO_REGISTER)
1885 cfun->machine->frame.wb_candidate2 = regno;
1886 offset += UNITS_PER_WORD;
1887 }
1888
1889 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
1890 if (cfun->machine->frame.reg_offset[regno] == SLOT_REQUIRED)
1891 {
1892 cfun->machine->frame.reg_offset[regno] = offset;
1893 if (cfun->machine->frame.wb_candidate1 == FIRST_PSEUDO_REGISTER)
1894 cfun->machine->frame.wb_candidate1 = regno;
1895 else if (cfun->machine->frame.wb_candidate2 == FIRST_PSEUDO_REGISTER
1896 && cfun->machine->frame.wb_candidate1 >= V0_REGNUM)
1897 cfun->machine->frame.wb_candidate2 = regno;
1898 offset += UNITS_PER_WORD;
1899 }
1900
1901 cfun->machine->frame.padding0 =
1902 (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset);
1903 offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
1904
1905 cfun->machine->frame.saved_regs_size = offset;
1906
1907 cfun->machine->frame.hard_fp_offset
1908 = AARCH64_ROUND_UP (cfun->machine->frame.saved_varargs_size
1909 + get_frame_size ()
1910 + cfun->machine->frame.saved_regs_size,
1911 STACK_BOUNDARY / BITS_PER_UNIT);
1912
1913 cfun->machine->frame.frame_size
1914 = AARCH64_ROUND_UP (cfun->machine->frame.hard_fp_offset
1915 + crtl->outgoing_args_size,
1916 STACK_BOUNDARY / BITS_PER_UNIT);
1917
1918 cfun->machine->frame.laid_out = true;
1919 }
1920
1921 /* Make the last instruction frame-related and note that it performs
1922 the operation described by FRAME_PATTERN. */
1923
1924 static void
1925 aarch64_set_frame_expr (rtx frame_pattern)
1926 {
1927 rtx_insn *insn;
1928
1929 insn = get_last_insn ();
1930 RTX_FRAME_RELATED_P (insn) = 1;
1931 RTX_FRAME_RELATED_P (frame_pattern) = 1;
1932 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1933 frame_pattern,
1934 REG_NOTES (insn));
1935 }
1936
1937 static bool
1938 aarch64_register_saved_on_entry (int regno)
1939 {
1940 return cfun->machine->frame.reg_offset[regno] >= 0;
1941 }
1942
1943 static unsigned
1944 aarch64_next_callee_save (unsigned regno, unsigned limit)
1945 {
1946 while (regno <= limit && !aarch64_register_saved_on_entry (regno))
1947 regno ++;
1948 return regno;
1949 }
1950
1951 static void
1952 aarch64_pushwb_single_reg (enum machine_mode mode, unsigned regno,
1953 HOST_WIDE_INT adjustment)
1954 {
1955 rtx base_rtx = stack_pointer_rtx;
1956 rtx insn, reg, mem;
1957
1958 reg = gen_rtx_REG (mode, regno);
1959 mem = gen_rtx_PRE_MODIFY (Pmode, base_rtx,
1960 plus_constant (Pmode, base_rtx, -adjustment));
1961 mem = gen_rtx_MEM (mode, mem);
1962
1963 insn = emit_move_insn (mem, reg);
1964 RTX_FRAME_RELATED_P (insn) = 1;
1965 }
1966
1967 static void
1968 aarch64_popwb_single_reg (enum machine_mode mode, unsigned regno,
1969 HOST_WIDE_INT adjustment)
1970 {
1971 rtx base_rtx = stack_pointer_rtx;
1972 rtx insn, reg, mem;
1973
1974 reg = gen_rtx_REG (mode, regno);
1975 mem = gen_rtx_POST_MODIFY (Pmode, base_rtx,
1976 plus_constant (Pmode, base_rtx, adjustment));
1977 mem = gen_rtx_MEM (mode, mem);
1978
1979 insn = emit_move_insn (reg, mem);
1980 add_reg_note (insn, REG_CFA_RESTORE, reg);
1981 RTX_FRAME_RELATED_P (insn) = 1;
1982 }
1983
1984 static rtx
1985 aarch64_gen_storewb_pair (enum machine_mode mode, rtx base, rtx reg, rtx reg2,
1986 HOST_WIDE_INT adjustment)
1987 {
1988 switch (mode)
1989 {
1990 case DImode:
1991 return gen_storewb_pairdi_di (base, base, reg, reg2,
1992 GEN_INT (-adjustment),
1993 GEN_INT (UNITS_PER_WORD - adjustment));
1994 case DFmode:
1995 return gen_storewb_pairdf_di (base, base, reg, reg2,
1996 GEN_INT (-adjustment),
1997 GEN_INT (UNITS_PER_WORD - adjustment));
1998 default:
1999 gcc_unreachable ();
2000 }
2001 }
2002
2003 static void
2004 aarch64_pushwb_pair_reg (enum machine_mode mode, unsigned regno1,
2005 unsigned regno2, HOST_WIDE_INT adjustment)
2006 {
2007 rtx_insn *insn;
2008 rtx reg1 = gen_rtx_REG (mode, regno1);
2009 rtx reg2 = gen_rtx_REG (mode, regno2);
2010
2011 insn = emit_insn (aarch64_gen_storewb_pair (mode, stack_pointer_rtx, reg1,
2012 reg2, adjustment));
2013 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
2014
2015 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
2016 RTX_FRAME_RELATED_P (insn) = 1;
2017 }
2018
2019 static rtx
2020 aarch64_gen_loadwb_pair (enum machine_mode mode, rtx base, rtx reg, rtx reg2,
2021 HOST_WIDE_INT adjustment)
2022 {
2023 switch (mode)
2024 {
2025 case DImode:
2026 return gen_loadwb_pairdi_di (base, base, reg, reg2, GEN_INT (adjustment),
2027 GEN_INT (UNITS_PER_WORD));
2028 case DFmode:
2029 return gen_loadwb_pairdf_di (base, base, reg, reg2, GEN_INT (adjustment),
2030 GEN_INT (UNITS_PER_WORD));
2031 default:
2032 gcc_unreachable ();
2033 }
2034 }
2035
2036 static void
2037 aarch64_popwb_pair_reg (enum machine_mode mode, unsigned regno1,
2038 unsigned regno2, HOST_WIDE_INT adjustment, rtx cfa)
2039 {
2040 rtx insn;
2041 rtx reg1 = gen_rtx_REG (mode, regno1);
2042 rtx reg2 = gen_rtx_REG (mode, regno2);
2043
2044 insn = emit_insn (aarch64_gen_loadwb_pair (mode, stack_pointer_rtx, reg1,
2045 reg2, adjustment));
2046 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
2047 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
2048 RTX_FRAME_RELATED_P (insn) = 1;
2049
2050 if (cfa)
2051 add_reg_note (insn, REG_CFA_ADJUST_CFA,
2052 (gen_rtx_SET (Pmode, stack_pointer_rtx,
2053 plus_constant (Pmode, cfa, adjustment))));
2054
2055 add_reg_note (insn, REG_CFA_RESTORE, reg1);
2056 add_reg_note (insn, REG_CFA_RESTORE, reg2);
2057 }
2058
2059 static rtx
2060 aarch64_gen_store_pair (enum machine_mode mode, rtx mem1, rtx reg1, rtx mem2,
2061 rtx reg2)
2062 {
2063 switch (mode)
2064 {
2065 case DImode:
2066 return gen_store_pairdi (mem1, reg1, mem2, reg2);
2067
2068 case DFmode:
2069 return gen_store_pairdf (mem1, reg1, mem2, reg2);
2070
2071 default:
2072 gcc_unreachable ();
2073 }
2074 }
2075
2076 static rtx
2077 aarch64_gen_load_pair (enum machine_mode mode, rtx reg1, rtx mem1, rtx reg2,
2078 rtx mem2)
2079 {
2080 switch (mode)
2081 {
2082 case DImode:
2083 return gen_load_pairdi (reg1, mem1, reg2, mem2);
2084
2085 case DFmode:
2086 return gen_load_pairdf (reg1, mem1, reg2, mem2);
2087
2088 default:
2089 gcc_unreachable ();
2090 }
2091 }
2092
2093
2094 static void
2095 aarch64_save_callee_saves (enum machine_mode mode, HOST_WIDE_INT start_offset,
2096 unsigned start, unsigned limit, bool skip_wb)
2097 {
2098 rtx_insn *insn;
2099 rtx (*gen_mem_ref) (enum machine_mode, rtx) = (frame_pointer_needed
2100 ? gen_frame_mem : gen_rtx_MEM);
2101 unsigned regno;
2102 unsigned regno2;
2103
2104 for (regno = aarch64_next_callee_save (start, limit);
2105 regno <= limit;
2106 regno = aarch64_next_callee_save (regno + 1, limit))
2107 {
2108 rtx reg, mem;
2109 HOST_WIDE_INT offset;
2110
2111 if (skip_wb
2112 && (regno == cfun->machine->frame.wb_candidate1
2113 || regno == cfun->machine->frame.wb_candidate2))
2114 continue;
2115
2116 reg = gen_rtx_REG (mode, regno);
2117 offset = start_offset + cfun->machine->frame.reg_offset[regno];
2118 mem = gen_mem_ref (mode, plus_constant (Pmode, stack_pointer_rtx,
2119 offset));
2120
2121 regno2 = aarch64_next_callee_save (regno + 1, limit);
2122
2123 if (regno2 <= limit
2124 && ((cfun->machine->frame.reg_offset[regno] + UNITS_PER_WORD)
2125 == cfun->machine->frame.reg_offset[regno2]))
2126
2127 {
2128 rtx reg2 = gen_rtx_REG (mode, regno2);
2129 rtx mem2;
2130
2131 offset = start_offset + cfun->machine->frame.reg_offset[regno2];
2132 mem2 = gen_mem_ref (mode, plus_constant (Pmode, stack_pointer_rtx,
2133 offset));
2134 insn = emit_insn (aarch64_gen_store_pair (mode, mem, reg, mem2,
2135 reg2));
2136
2137 /* The first part of a frame-related parallel insn is
2138 always assumed to be relevant to the frame
2139 calculations; subsequent parts, are only
2140 frame-related if explicitly marked. */
2141 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
2142 regno = regno2;
2143 }
2144 else
2145 insn = emit_move_insn (mem, reg);
2146
2147 RTX_FRAME_RELATED_P (insn) = 1;
2148 }
2149 }
2150
2151 static void
2152 aarch64_restore_callee_saves (enum machine_mode mode,
2153 HOST_WIDE_INT start_offset, unsigned start,
2154 unsigned limit, bool skip_wb)
2155 {
2156 rtx insn;
2157 rtx base_rtx = stack_pointer_rtx;
2158 rtx (*gen_mem_ref) (enum machine_mode, rtx) = (frame_pointer_needed
2159 ? gen_frame_mem : gen_rtx_MEM);
2160 unsigned regno;
2161 unsigned regno2;
2162 HOST_WIDE_INT offset;
2163
2164 for (regno = aarch64_next_callee_save (start, limit);
2165 regno <= limit;
2166 regno = aarch64_next_callee_save (regno + 1, limit))
2167 {
2168 rtx reg, mem;
2169
2170 if (skip_wb
2171 && (regno == cfun->machine->frame.wb_candidate1
2172 || regno == cfun->machine->frame.wb_candidate2))
2173 continue;
2174
2175 reg = gen_rtx_REG (mode, regno);
2176 offset = start_offset + cfun->machine->frame.reg_offset[regno];
2177 mem = gen_mem_ref (mode, plus_constant (Pmode, base_rtx, offset));
2178
2179 regno2 = aarch64_next_callee_save (regno + 1, limit);
2180
2181 if (regno2 <= limit
2182 && ((cfun->machine->frame.reg_offset[regno] + UNITS_PER_WORD)
2183 == cfun->machine->frame.reg_offset[regno2]))
2184 {
2185 rtx reg2 = gen_rtx_REG (mode, regno2);
2186 rtx mem2;
2187
2188 offset = start_offset + cfun->machine->frame.reg_offset[regno2];
2189 mem2 = gen_mem_ref (mode, plus_constant (Pmode, base_rtx, offset));
2190 insn = emit_insn (aarch64_gen_load_pair (mode, reg, mem, reg2,
2191 mem2));
2192 add_reg_note (insn, REG_CFA_RESTORE, reg);
2193 add_reg_note (insn, REG_CFA_RESTORE, reg2);
2194
2195 /* The first part of a frame-related parallel insn is
2196 always assumed to be relevant to the frame
2197 calculations; subsequent parts, are only
2198 frame-related if explicitly marked. */
2199 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
2200 regno = regno2;
2201 }
2202 else
2203 {
2204 insn = emit_move_insn (reg, mem);
2205 add_reg_note (insn, REG_CFA_RESTORE, reg);
2206 }
2207
2208 RTX_FRAME_RELATED_P (insn) = 1;
2209 }
2210 }
2211
2212 /* AArch64 stack frames generated by this compiler look like:
2213
2214 +-------------------------------+
2215 | |
2216 | incoming stack arguments |
2217 | |
2218 +-------------------------------+
2219 | | <-- incoming stack pointer (aligned)
2220 | callee-allocated save area |
2221 | for register varargs |
2222 | |
2223 +-------------------------------+
2224 | local variables | <-- frame_pointer_rtx
2225 | |
2226 +-------------------------------+
2227 | padding0 | \
2228 +-------------------------------+ |
2229 | callee-saved registers | | frame.saved_regs_size
2230 +-------------------------------+ |
2231 | LR' | |
2232 +-------------------------------+ |
2233 | FP' | / <- hard_frame_pointer_rtx (aligned)
2234 +-------------------------------+
2235 | dynamic allocation |
2236 +-------------------------------+
2237 | padding |
2238 +-------------------------------+
2239 | outgoing stack arguments | <-- arg_pointer
2240 | |
2241 +-------------------------------+
2242 | | <-- stack_pointer_rtx (aligned)
2243
2244 Dynamic stack allocations via alloca() decrease stack_pointer_rtx
2245 but leave frame_pointer_rtx and hard_frame_pointer_rtx
2246 unchanged. */
2247
2248 /* Generate the prologue instructions for entry into a function.
2249 Establish the stack frame by decreasing the stack pointer with a
2250 properly calculated size and, if necessary, create a frame record
2251 filled with the values of LR and previous frame pointer. The
2252 current FP is also set up if it is in use. */
2253
2254 void
2255 aarch64_expand_prologue (void)
2256 {
2257 /* sub sp, sp, #<frame_size>
2258 stp {fp, lr}, [sp, #<frame_size> - 16]
2259 add fp, sp, #<frame_size> - hardfp_offset
2260 stp {cs_reg}, [fp, #-16] etc.
2261
2262 sub sp, sp, <final_adjustment_if_any>
2263 */
2264 HOST_WIDE_INT frame_size, offset;
2265 HOST_WIDE_INT fp_offset; /* Offset from hard FP to SP. */
2266 rtx_insn *insn;
2267
2268 aarch64_layout_frame ();
2269
2270 if (flag_stack_usage_info)
2271 current_function_static_stack_size = cfun->machine->frame.frame_size;
2272
2273 frame_size = cfun->machine->frame.frame_size;
2274 offset = cfun->machine->frame.frame_size;
2275
2276 fp_offset = cfun->machine->frame.frame_size
2277 - cfun->machine->frame.hard_fp_offset;
2278
2279 /* Store pairs and load pairs have a range only -512 to 504. */
2280 if (offset >= 512)
2281 {
2282 /* When the frame has a large size, an initial decrease is done on
2283 the stack pointer to jump over the callee-allocated save area for
2284 register varargs, the local variable area and/or the callee-saved
2285 register area. This will allow the pre-index write-back
2286 store pair instructions to be used for setting up the stack frame
2287 efficiently. */
2288 offset = cfun->machine->frame.hard_fp_offset;
2289 if (offset >= 512)
2290 offset = cfun->machine->frame.saved_regs_size;
2291
2292 frame_size -= (offset + crtl->outgoing_args_size);
2293 fp_offset = 0;
2294
2295 if (frame_size >= 0x1000000)
2296 {
2297 rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
2298 emit_move_insn (op0, GEN_INT (-frame_size));
2299 emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
2300 aarch64_set_frame_expr (gen_rtx_SET
2301 (Pmode, stack_pointer_rtx,
2302 plus_constant (Pmode,
2303 stack_pointer_rtx,
2304 -frame_size)));
2305 }
2306 else if (frame_size > 0)
2307 {
2308 if ((frame_size & 0xfff) != frame_size)
2309 {
2310 insn = emit_insn (gen_add2_insn
2311 (stack_pointer_rtx,
2312 GEN_INT (-(frame_size
2313 & ~(HOST_WIDE_INT)0xfff))));
2314 RTX_FRAME_RELATED_P (insn) = 1;
2315 }
2316 if ((frame_size & 0xfff) != 0)
2317 {
2318 insn = emit_insn (gen_add2_insn
2319 (stack_pointer_rtx,
2320 GEN_INT (-(frame_size
2321 & (HOST_WIDE_INT)0xfff))));
2322 RTX_FRAME_RELATED_P (insn) = 1;
2323 }
2324 }
2325 }
2326 else
2327 frame_size = -1;
2328
2329 if (offset > 0)
2330 {
2331 bool skip_wb = false;
2332
2333 if (frame_pointer_needed)
2334 {
2335 skip_wb = true;
2336
2337 if (fp_offset)
2338 {
2339 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2340 GEN_INT (-offset)));
2341 RTX_FRAME_RELATED_P (insn) = 1;
2342 aarch64_set_frame_expr (gen_rtx_SET
2343 (Pmode, stack_pointer_rtx,
2344 gen_rtx_MINUS (Pmode, stack_pointer_rtx,
2345 GEN_INT (offset))));
2346
2347 aarch64_save_callee_saves (DImode, fp_offset, R29_REGNUM,
2348 R30_REGNUM, false);
2349 }
2350 else
2351 aarch64_pushwb_pair_reg (DImode, R29_REGNUM, R30_REGNUM, offset);
2352
2353 /* Set up frame pointer to point to the location of the
2354 previous frame pointer on the stack. */
2355 insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
2356 stack_pointer_rtx,
2357 GEN_INT (fp_offset)));
2358 aarch64_set_frame_expr (gen_rtx_SET
2359 (Pmode, hard_frame_pointer_rtx,
2360 plus_constant (Pmode,
2361 stack_pointer_rtx,
2362 fp_offset)));
2363 RTX_FRAME_RELATED_P (insn) = 1;
2364 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
2365 hard_frame_pointer_rtx));
2366 }
2367 else
2368 {
2369 unsigned reg1 = cfun->machine->frame.wb_candidate1;
2370 unsigned reg2 = cfun->machine->frame.wb_candidate2;
2371
2372 if (fp_offset
2373 || reg1 == FIRST_PSEUDO_REGISTER
2374 || (reg2 == FIRST_PSEUDO_REGISTER
2375 && offset >= 256))
2376 {
2377 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2378 GEN_INT (-offset)));
2379 RTX_FRAME_RELATED_P (insn) = 1;
2380 }
2381 else
2382 {
2383 enum machine_mode mode1 = (reg1 <= R30_REGNUM) ? DImode : DFmode;
2384
2385 skip_wb = true;
2386
2387 if (reg2 == FIRST_PSEUDO_REGISTER)
2388 aarch64_pushwb_single_reg (mode1, reg1, offset);
2389 else
2390 aarch64_pushwb_pair_reg (mode1, reg1, reg2, offset);
2391 }
2392 }
2393
2394 aarch64_save_callee_saves (DImode, fp_offset, R0_REGNUM, R30_REGNUM,
2395 skip_wb);
2396 aarch64_save_callee_saves (DFmode, fp_offset, V0_REGNUM, V31_REGNUM,
2397 skip_wb);
2398 }
2399
2400 /* when offset >= 512,
2401 sub sp, sp, #<outgoing_args_size> */
2402 if (frame_size > -1)
2403 {
2404 if (crtl->outgoing_args_size > 0)
2405 {
2406 insn = emit_insn (gen_add2_insn
2407 (stack_pointer_rtx,
2408 GEN_INT (- crtl->outgoing_args_size)));
2409 RTX_FRAME_RELATED_P (insn) = 1;
2410 }
2411 }
2412 }
2413
2414 /* Generate the epilogue instructions for returning from a function. */
2415 void
2416 aarch64_expand_epilogue (bool for_sibcall)
2417 {
2418 HOST_WIDE_INT frame_size, offset;
2419 HOST_WIDE_INT fp_offset;
2420 rtx_insn *insn;
2421 rtx cfa_reg;
2422
2423 aarch64_layout_frame ();
2424
2425 offset = frame_size = cfun->machine->frame.frame_size;
2426 fp_offset = cfun->machine->frame.frame_size
2427 - cfun->machine->frame.hard_fp_offset;
2428
2429 cfa_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
2430
2431 /* Store pairs and load pairs have a range only -512 to 504. */
2432 if (offset >= 512)
2433 {
2434 offset = cfun->machine->frame.hard_fp_offset;
2435 if (offset >= 512)
2436 offset = cfun->machine->frame.saved_regs_size;
2437
2438 frame_size -= (offset + crtl->outgoing_args_size);
2439 fp_offset = 0;
2440 if (!frame_pointer_needed && crtl->outgoing_args_size > 0)
2441 {
2442 insn = emit_insn (gen_add2_insn
2443 (stack_pointer_rtx,
2444 GEN_INT (crtl->outgoing_args_size)));
2445 RTX_FRAME_RELATED_P (insn) = 1;
2446 }
2447 }
2448 else
2449 frame_size = -1;
2450
2451 /* If there were outgoing arguments or we've done dynamic stack
2452 allocation, then restore the stack pointer from the frame
2453 pointer. This is at most one insn and more efficient than using
2454 GCC's internal mechanism. */
2455 if (frame_pointer_needed
2456 && (crtl->outgoing_args_size || cfun->calls_alloca))
2457 {
2458 insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
2459 hard_frame_pointer_rtx,
2460 GEN_INT (0)));
2461 offset = offset - fp_offset;
2462 RTX_FRAME_RELATED_P (insn) = 1;
2463 /* As SP is set to (FP - fp_offset), according to the rules in
2464 dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated
2465 from the value of SP from now on. */
2466 cfa_reg = stack_pointer_rtx;
2467 }
2468
2469 if (offset > 0)
2470 {
2471 unsigned reg1 = cfun->machine->frame.wb_candidate1;
2472 unsigned reg2 = cfun->machine->frame.wb_candidate2;
2473 bool skip_wb = true;
2474
2475 if (frame_pointer_needed)
2476 fp_offset = 0;
2477 else if (fp_offset
2478 || reg1 == FIRST_PSEUDO_REGISTER
2479 || (reg2 == FIRST_PSEUDO_REGISTER
2480 && offset >= 256))
2481 skip_wb = false;
2482
2483 aarch64_restore_callee_saves (DImode, fp_offset, R0_REGNUM, R30_REGNUM,
2484 skip_wb);
2485 aarch64_restore_callee_saves (DFmode, fp_offset, V0_REGNUM, V31_REGNUM,
2486 skip_wb);
2487
2488 if (skip_wb)
2489 {
2490 enum machine_mode mode1 = (reg1 <= R30_REGNUM) ? DImode : DFmode;
2491
2492 if (reg2 == FIRST_PSEUDO_REGISTER)
2493 aarch64_popwb_single_reg (mode1, reg1, offset);
2494 else
2495 {
2496 if (reg1 != HARD_FRAME_POINTER_REGNUM)
2497 cfa_reg = NULL;
2498
2499 aarch64_popwb_pair_reg (mode1, reg1, reg2, offset, cfa_reg);
2500 }
2501 }
2502 else
2503 {
2504 insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
2505 GEN_INT (offset)));
2506 RTX_FRAME_RELATED_P (insn) = 1;
2507 }
2508 }
2509
2510 /* Stack adjustment for exception handler. */
2511 if (crtl->calls_eh_return)
2512 {
2513 /* We need to unwind the stack by the offset computed by
2514 EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
2515 based on SP. Ideally we would update the SP and define the
2516 CFA along the lines of:
2517
2518 SP = SP + EH_RETURN_STACKADJ_RTX
2519 (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
2520
2521 However the dwarf emitter only understands a constant
2522 register offset.
2523
2524 The solution chosen here is to use the otherwise unused IP0
2525 as a temporary register to hold the current SP value. The
2526 CFA is described using IP0 then SP is modified. */
2527
2528 rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM);
2529
2530 insn = emit_move_insn (ip0, stack_pointer_rtx);
2531 add_reg_note (insn, REG_CFA_DEF_CFA, ip0);
2532 RTX_FRAME_RELATED_P (insn) = 1;
2533
2534 emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
2535
2536 /* Ensure the assignment to IP0 does not get optimized away. */
2537 emit_use (ip0);
2538 }
2539
2540 if (frame_size > -1)
2541 {
2542 if (frame_size >= 0x1000000)
2543 {
2544 rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
2545 emit_move_insn (op0, GEN_INT (frame_size));
2546 emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
2547 aarch64_set_frame_expr (gen_rtx_SET
2548 (Pmode, stack_pointer_rtx,
2549 plus_constant (Pmode,
2550 stack_pointer_rtx,
2551 frame_size)));
2552 }
2553 else if (frame_size > 0)
2554 {
2555 if ((frame_size & 0xfff) != 0)
2556 {
2557 insn = emit_insn (gen_add2_insn
2558 (stack_pointer_rtx,
2559 GEN_INT ((frame_size
2560 & (HOST_WIDE_INT) 0xfff))));
2561 RTX_FRAME_RELATED_P (insn) = 1;
2562 }
2563 if ((frame_size & 0xfff) != frame_size)
2564 {
2565 insn = emit_insn (gen_add2_insn
2566 (stack_pointer_rtx,
2567 GEN_INT ((frame_size
2568 & ~ (HOST_WIDE_INT) 0xfff))));
2569 RTX_FRAME_RELATED_P (insn) = 1;
2570 }
2571 }
2572
2573 aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx,
2574 plus_constant (Pmode,
2575 stack_pointer_rtx,
2576 offset)));
2577 }
2578
2579 emit_use (gen_rtx_REG (DImode, LR_REGNUM));
2580 if (!for_sibcall)
2581 emit_jump_insn (ret_rtx);
2582 }
2583
2584 /* Return the place to copy the exception unwinding return address to.
2585 This will probably be a stack slot, but could (in theory be the
2586 return register). */
2587 rtx
2588 aarch64_final_eh_return_addr (void)
2589 {
2590 HOST_WIDE_INT fp_offset;
2591
2592 aarch64_layout_frame ();
2593
2594 fp_offset = cfun->machine->frame.frame_size
2595 - cfun->machine->frame.hard_fp_offset;
2596
2597 if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
2598 return gen_rtx_REG (DImode, LR_REGNUM);
2599
2600 /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
2601 result in a store to save LR introduced by builtin_eh_return () being
2602 incorrectly deleted because the alias is not detected.
2603 So in the calculation of the address to copy the exception unwinding
2604 return address to, we note 2 cases.
2605 If FP is needed and the fp_offset is 0, it means that SP = FP and hence
2606 we return a SP-relative location since all the addresses are SP-relative
2607 in this case. This prevents the store from being optimized away.
2608 If the fp_offset is not 0, then the addresses will be FP-relative and
2609 therefore we return a FP-relative location. */
2610
2611 if (frame_pointer_needed)
2612 {
2613 if (fp_offset)
2614 return gen_frame_mem (DImode,
2615 plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
2616 else
2617 return gen_frame_mem (DImode,
2618 plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD));
2619 }
2620
2621 /* If FP is not needed, we calculate the location of LR, which would be
2622 at the top of the saved registers block. */
2623
2624 return gen_frame_mem (DImode,
2625 plus_constant (Pmode,
2626 stack_pointer_rtx,
2627 fp_offset
2628 + cfun->machine->frame.saved_regs_size
2629 - 2 * UNITS_PER_WORD));
2630 }
2631
2632 /* Possibly output code to build up a constant in a register. For
2633 the benefit of the costs infrastructure, returns the number of
2634 instructions which would be emitted. GENERATE inhibits or
2635 enables code generation. */
2636
2637 static int
2638 aarch64_build_constant (int regnum, HOST_WIDE_INT val, bool generate)
2639 {
2640 int insns = 0;
2641
2642 if (aarch64_bitmask_imm (val, DImode))
2643 {
2644 if (generate)
2645 emit_move_insn (gen_rtx_REG (Pmode, regnum), GEN_INT (val));
2646 insns = 1;
2647 }
2648 else
2649 {
2650 int i;
2651 int ncount = 0;
2652 int zcount = 0;
2653 HOST_WIDE_INT valp = val >> 16;
2654 HOST_WIDE_INT valm;
2655 HOST_WIDE_INT tval;
2656
2657 for (i = 16; i < 64; i += 16)
2658 {
2659 valm = (valp & 0xffff);
2660
2661 if (valm != 0)
2662 ++ zcount;
2663
2664 if (valm != 0xffff)
2665 ++ ncount;
2666
2667 valp >>= 16;
2668 }
2669
2670 /* zcount contains the number of additional MOVK instructions
2671 required if the constant is built up with an initial MOVZ instruction,
2672 while ncount is the number of MOVK instructions required if starting
2673 with a MOVN instruction. Choose the sequence that yields the fewest
2674 number of instructions, preferring MOVZ instructions when they are both
2675 the same. */
2676 if (ncount < zcount)
2677 {
2678 if (generate)
2679 emit_move_insn (gen_rtx_REG (Pmode, regnum),
2680 GEN_INT (val | ~(HOST_WIDE_INT) 0xffff));
2681 tval = 0xffff;
2682 insns++;
2683 }
2684 else
2685 {
2686 if (generate)
2687 emit_move_insn (gen_rtx_REG (Pmode, regnum),
2688 GEN_INT (val & 0xffff));
2689 tval = 0;
2690 insns++;
2691 }
2692
2693 val >>= 16;
2694
2695 for (i = 16; i < 64; i += 16)
2696 {
2697 if ((val & 0xffff) != tval)
2698 {
2699 if (generate)
2700 emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode, regnum),
2701 GEN_INT (i),
2702 GEN_INT (val & 0xffff)));
2703 insns++;
2704 }
2705 val >>= 16;
2706 }
2707 }
2708 return insns;
2709 }
2710
2711 static void
2712 aarch64_add_constant (int regnum, int scratchreg, HOST_WIDE_INT delta)
2713 {
2714 HOST_WIDE_INT mdelta = delta;
2715 rtx this_rtx = gen_rtx_REG (Pmode, regnum);
2716 rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg);
2717
2718 if (mdelta < 0)
2719 mdelta = -mdelta;
2720
2721 if (mdelta >= 4096 * 4096)
2722 {
2723 (void) aarch64_build_constant (scratchreg, delta, true);
2724 emit_insn (gen_add3_insn (this_rtx, this_rtx, scratch_rtx));
2725 }
2726 else if (mdelta > 0)
2727 {
2728 if (mdelta >= 4096)
2729 {
2730 emit_insn (gen_rtx_SET (Pmode, scratch_rtx, GEN_INT (mdelta / 4096)));
2731 rtx shift = gen_rtx_ASHIFT (Pmode, scratch_rtx, GEN_INT (12));
2732 if (delta < 0)
2733 emit_insn (gen_rtx_SET (Pmode, this_rtx,
2734 gen_rtx_MINUS (Pmode, this_rtx, shift)));
2735 else
2736 emit_insn (gen_rtx_SET (Pmode, this_rtx,
2737 gen_rtx_PLUS (Pmode, this_rtx, shift)));
2738 }
2739 if (mdelta % 4096 != 0)
2740 {
2741 scratch_rtx = GEN_INT ((delta < 0 ? -1 : 1) * (mdelta % 4096));
2742 emit_insn (gen_rtx_SET (Pmode, this_rtx,
2743 gen_rtx_PLUS (Pmode, this_rtx, scratch_rtx)));
2744 }
2745 }
2746 }
2747
2748 /* Output code to add DELTA to the first argument, and then jump
2749 to FUNCTION. Used for C++ multiple inheritance. */
2750 static void
2751 aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
2752 HOST_WIDE_INT delta,
2753 HOST_WIDE_INT vcall_offset,
2754 tree function)
2755 {
2756 /* The this pointer is always in x0. Note that this differs from
2757 Arm where the this pointer maybe bumped to r1 if r0 is required
2758 to return a pointer to an aggregate. On AArch64 a result value
2759 pointer will be in x8. */
2760 int this_regno = R0_REGNUM;
2761 rtx this_rtx, temp0, temp1, addr, funexp;
2762 rtx_insn *insn;
2763
2764 reload_completed = 1;
2765 emit_note (NOTE_INSN_PROLOGUE_END);
2766
2767 if (vcall_offset == 0)
2768 aarch64_add_constant (this_regno, IP1_REGNUM, delta);
2769 else
2770 {
2771 gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0);
2772
2773 this_rtx = gen_rtx_REG (Pmode, this_regno);
2774 temp0 = gen_rtx_REG (Pmode, IP0_REGNUM);
2775 temp1 = gen_rtx_REG (Pmode, IP1_REGNUM);
2776
2777 addr = this_rtx;
2778 if (delta != 0)
2779 {
2780 if (delta >= -256 && delta < 256)
2781 addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx,
2782 plus_constant (Pmode, this_rtx, delta));
2783 else
2784 aarch64_add_constant (this_regno, IP1_REGNUM, delta);
2785 }
2786
2787 if (Pmode == ptr_mode)
2788 aarch64_emit_move (temp0, gen_rtx_MEM (ptr_mode, addr));
2789 else
2790 aarch64_emit_move (temp0,
2791 gen_rtx_ZERO_EXTEND (Pmode,
2792 gen_rtx_MEM (ptr_mode, addr)));
2793
2794 if (vcall_offset >= -256 && vcall_offset < 4096 * POINTER_BYTES)
2795 addr = plus_constant (Pmode, temp0, vcall_offset);
2796 else
2797 {
2798 (void) aarch64_build_constant (IP1_REGNUM, vcall_offset, true);
2799 addr = gen_rtx_PLUS (Pmode, temp0, temp1);
2800 }
2801
2802 if (Pmode == ptr_mode)
2803 aarch64_emit_move (temp1, gen_rtx_MEM (ptr_mode,addr));
2804 else
2805 aarch64_emit_move (temp1,
2806 gen_rtx_SIGN_EXTEND (Pmode,
2807 gen_rtx_MEM (ptr_mode, addr)));
2808
2809 emit_insn (gen_add2_insn (this_rtx, temp1));
2810 }
2811
2812 /* Generate a tail call to the target function. */
2813 if (!TREE_USED (function))
2814 {
2815 assemble_external (function);
2816 TREE_USED (function) = 1;
2817 }
2818 funexp = XEXP (DECL_RTL (function), 0);
2819 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
2820 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
2821 SIBLING_CALL_P (insn) = 1;
2822
2823 insn = get_insns ();
2824 shorten_branches (insn);
2825 final_start_function (insn, file, 1);
2826 final (insn, file, 1);
2827 final_end_function ();
2828
2829 /* Stop pretending to be a post-reload pass. */
2830 reload_completed = 0;
2831 }
2832
2833 static int
2834 aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
2835 {
2836 if (GET_CODE (*x) == SYMBOL_REF)
2837 return SYMBOL_REF_TLS_MODEL (*x) != 0;
2838
2839 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
2840 TLS offsets, not real symbol references. */
2841 if (GET_CODE (*x) == UNSPEC
2842 && XINT (*x, 1) == UNSPEC_TLS)
2843 return -1;
2844
2845 return 0;
2846 }
2847
2848 static bool
2849 aarch64_tls_referenced_p (rtx x)
2850 {
2851 if (!TARGET_HAVE_TLS)
2852 return false;
2853
2854 return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL);
2855 }
2856
2857
2858 static int
2859 aarch64_bitmasks_cmp (const void *i1, const void *i2)
2860 {
2861 const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1;
2862 const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2;
2863
2864 if (*imm1 < *imm2)
2865 return -1;
2866 if (*imm1 > *imm2)
2867 return +1;
2868 return 0;
2869 }
2870
2871
2872 static void
2873 aarch64_build_bitmask_table (void)
2874 {
2875 unsigned HOST_WIDE_INT mask, imm;
2876 unsigned int log_e, e, s, r;
2877 unsigned int nimms = 0;
2878
2879 for (log_e = 1; log_e <= 6; log_e++)
2880 {
2881 e = 1 << log_e;
2882 if (e == 64)
2883 mask = ~(HOST_WIDE_INT) 0;
2884 else
2885 mask = ((HOST_WIDE_INT) 1 << e) - 1;
2886 for (s = 1; s < e; s++)
2887 {
2888 for (r = 0; r < e; r++)
2889 {
2890 /* set s consecutive bits to 1 (s < 64) */
2891 imm = ((unsigned HOST_WIDE_INT)1 << s) - 1;
2892 /* rotate right by r */
2893 if (r != 0)
2894 imm = ((imm >> r) | (imm << (e - r))) & mask;
2895 /* replicate the constant depending on SIMD size */
2896 switch (log_e) {
2897 case 1: imm |= (imm << 2);
2898 case 2: imm |= (imm << 4);
2899 case 3: imm |= (imm << 8);
2900 case 4: imm |= (imm << 16);
2901 case 5: imm |= (imm << 32);
2902 case 6:
2903 break;
2904 default:
2905 gcc_unreachable ();
2906 }
2907 gcc_assert (nimms < AARCH64_NUM_BITMASKS);
2908 aarch64_bitmasks[nimms++] = imm;
2909 }
2910 }
2911 }
2912
2913 gcc_assert (nimms == AARCH64_NUM_BITMASKS);
2914 qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]),
2915 aarch64_bitmasks_cmp);
2916 }
2917
2918
2919 /* Return true if val can be encoded as a 12-bit unsigned immediate with
2920 a left shift of 0 or 12 bits. */
2921 bool
2922 aarch64_uimm12_shift (HOST_WIDE_INT val)
2923 {
2924 return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
2925 || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
2926 );
2927 }
2928
2929
2930 /* Return true if val is an immediate that can be loaded into a
2931 register by a MOVZ instruction. */
2932 static bool
2933 aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode)
2934 {
2935 if (GET_MODE_SIZE (mode) > 4)
2936 {
2937 if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
2938 || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
2939 return 1;
2940 }
2941 else
2942 {
2943 /* Ignore sign extension. */
2944 val &= (HOST_WIDE_INT) 0xffffffff;
2945 }
2946 return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
2947 || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
2948 }
2949
2950
2951 /* Return true if val is a valid bitmask immediate. */
2952 bool
2953 aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode)
2954 {
2955 if (GET_MODE_SIZE (mode) < 8)
2956 {
2957 /* Replicate bit pattern. */
2958 val &= (HOST_WIDE_INT) 0xffffffff;
2959 val |= val << 32;
2960 }
2961 return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS,
2962 sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL;
2963 }
2964
2965
2966 /* Return true if val is an immediate that can be loaded into a
2967 register in a single instruction. */
2968 bool
2969 aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode)
2970 {
2971 if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
2972 return 1;
2973 return aarch64_bitmask_imm (val, mode);
2974 }
2975
2976 static bool
2977 aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2978 {
2979 rtx base, offset;
2980
2981 if (GET_CODE (x) == HIGH)
2982 return true;
2983
2984 split_const (x, &base, &offset);
2985 if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
2986 {
2987 if (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR)
2988 != SYMBOL_FORCE_TO_MEM)
2989 return true;
2990 else
2991 /* Avoid generating a 64-bit relocation in ILP32; leave
2992 to aarch64_expand_mov_immediate to handle it properly. */
2993 return mode != ptr_mode;
2994 }
2995
2996 return aarch64_tls_referenced_p (x);
2997 }
2998
2999 /* Return true if register REGNO is a valid index register.
3000 STRICT_P is true if REG_OK_STRICT is in effect. */
3001
3002 bool
3003 aarch64_regno_ok_for_index_p (int regno, bool strict_p)
3004 {
3005 if (!HARD_REGISTER_NUM_P (regno))
3006 {
3007 if (!strict_p)
3008 return true;
3009
3010 if (!reg_renumber)
3011 return false;
3012
3013 regno = reg_renumber[regno];
3014 }
3015 return GP_REGNUM_P (regno);
3016 }
3017
3018 /* Return true if register REGNO is a valid base register for mode MODE.
3019 STRICT_P is true if REG_OK_STRICT is in effect. */
3020
3021 bool
3022 aarch64_regno_ok_for_base_p (int regno, bool strict_p)
3023 {
3024 if (!HARD_REGISTER_NUM_P (regno))
3025 {
3026 if (!strict_p)
3027 return true;
3028
3029 if (!reg_renumber)
3030 return false;
3031
3032 regno = reg_renumber[regno];
3033 }
3034
3035 /* The fake registers will be eliminated to either the stack or
3036 hard frame pointer, both of which are usually valid base registers.
3037 Reload deals with the cases where the eliminated form isn't valid. */
3038 return (GP_REGNUM_P (regno)
3039 || regno == SP_REGNUM
3040 || regno == FRAME_POINTER_REGNUM
3041 || regno == ARG_POINTER_REGNUM);
3042 }
3043
3044 /* Return true if X is a valid base register for mode MODE.
3045 STRICT_P is true if REG_OK_STRICT is in effect. */
3046
3047 static bool
3048 aarch64_base_register_rtx_p (rtx x, bool strict_p)
3049 {
3050 if (!strict_p && GET_CODE (x) == SUBREG)
3051 x = SUBREG_REG (x);
3052
3053 return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
3054 }
3055
3056 /* Return true if address offset is a valid index. If it is, fill in INFO
3057 appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
3058
3059 static bool
3060 aarch64_classify_index (struct aarch64_address_info *info, rtx x,
3061 enum machine_mode mode, bool strict_p)
3062 {
3063 enum aarch64_address_type type;
3064 rtx index;
3065 int shift;
3066
3067 /* (reg:P) */
3068 if ((REG_P (x) || GET_CODE (x) == SUBREG)
3069 && GET_MODE (x) == Pmode)
3070 {
3071 type = ADDRESS_REG_REG;
3072 index = x;
3073 shift = 0;
3074 }
3075 /* (sign_extend:DI (reg:SI)) */
3076 else if ((GET_CODE (x) == SIGN_EXTEND
3077 || GET_CODE (x) == ZERO_EXTEND)
3078 && GET_MODE (x) == DImode
3079 && GET_MODE (XEXP (x, 0)) == SImode)
3080 {
3081 type = (GET_CODE (x) == SIGN_EXTEND)
3082 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3083 index = XEXP (x, 0);
3084 shift = 0;
3085 }
3086 /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
3087 else if (GET_CODE (x) == MULT
3088 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
3089 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
3090 && GET_MODE (XEXP (x, 0)) == DImode
3091 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
3092 && CONST_INT_P (XEXP (x, 1)))
3093 {
3094 type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
3095 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3096 index = XEXP (XEXP (x, 0), 0);
3097 shift = exact_log2 (INTVAL (XEXP (x, 1)));
3098 }
3099 /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
3100 else if (GET_CODE (x) == ASHIFT
3101 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
3102 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
3103 && GET_MODE (XEXP (x, 0)) == DImode
3104 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
3105 && CONST_INT_P (XEXP (x, 1)))
3106 {
3107 type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
3108 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3109 index = XEXP (XEXP (x, 0), 0);
3110 shift = INTVAL (XEXP (x, 1));
3111 }
3112 /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
3113 else if ((GET_CODE (x) == SIGN_EXTRACT
3114 || GET_CODE (x) == ZERO_EXTRACT)
3115 && GET_MODE (x) == DImode
3116 && GET_CODE (XEXP (x, 0)) == MULT
3117 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3118 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
3119 {
3120 type = (GET_CODE (x) == SIGN_EXTRACT)
3121 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3122 index = XEXP (XEXP (x, 0), 0);
3123 shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
3124 if (INTVAL (XEXP (x, 1)) != 32 + shift
3125 || INTVAL (XEXP (x, 2)) != 0)
3126 shift = -1;
3127 }
3128 /* (and:DI (mult:DI (reg:DI) (const_int scale))
3129 (const_int 0xffffffff<<shift)) */
3130 else if (GET_CODE (x) == AND
3131 && GET_MODE (x) == DImode
3132 && GET_CODE (XEXP (x, 0)) == MULT
3133 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3134 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3135 && CONST_INT_P (XEXP (x, 1)))
3136 {
3137 type = ADDRESS_REG_UXTW;
3138 index = XEXP (XEXP (x, 0), 0);
3139 shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
3140 if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
3141 shift = -1;
3142 }
3143 /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
3144 else if ((GET_CODE (x) == SIGN_EXTRACT
3145 || GET_CODE (x) == ZERO_EXTRACT)
3146 && GET_MODE (x) == DImode
3147 && GET_CODE (XEXP (x, 0)) == ASHIFT
3148 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3149 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
3150 {
3151 type = (GET_CODE (x) == SIGN_EXTRACT)
3152 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3153 index = XEXP (XEXP (x, 0), 0);
3154 shift = INTVAL (XEXP (XEXP (x, 0), 1));
3155 if (INTVAL (XEXP (x, 1)) != 32 + shift
3156 || INTVAL (XEXP (x, 2)) != 0)
3157 shift = -1;
3158 }
3159 /* (and:DI (ashift:DI (reg:DI) (const_int shift))
3160 (const_int 0xffffffff<<shift)) */
3161 else if (GET_CODE (x) == AND
3162 && GET_MODE (x) == DImode
3163 && GET_CODE (XEXP (x, 0)) == ASHIFT
3164 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3165 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3166 && CONST_INT_P (XEXP (x, 1)))
3167 {
3168 type = ADDRESS_REG_UXTW;
3169 index = XEXP (XEXP (x, 0), 0);
3170 shift = INTVAL (XEXP (XEXP (x, 0), 1));
3171 if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
3172 shift = -1;
3173 }
3174 /* (mult:P (reg:P) (const_int scale)) */
3175 else if (GET_CODE (x) == MULT
3176 && GET_MODE (x) == Pmode
3177 && GET_MODE (XEXP (x, 0)) == Pmode
3178 && CONST_INT_P (XEXP (x, 1)))
3179 {
3180 type = ADDRESS_REG_REG;
3181 index = XEXP (x, 0);
3182 shift = exact_log2 (INTVAL (XEXP (x, 1)));
3183 }
3184 /* (ashift:P (reg:P) (const_int shift)) */
3185 else if (GET_CODE (x) == ASHIFT
3186 && GET_MODE (x) == Pmode
3187 && GET_MODE (XEXP (x, 0)) == Pmode
3188 && CONST_INT_P (XEXP (x, 1)))
3189 {
3190 type = ADDRESS_REG_REG;
3191 index = XEXP (x, 0);
3192 shift = INTVAL (XEXP (x, 1));
3193 }
3194 else
3195 return false;
3196
3197 if (GET_CODE (index) == SUBREG)
3198 index = SUBREG_REG (index);
3199
3200 if ((shift == 0 ||
3201 (shift > 0 && shift <= 3
3202 && (1 << shift) == GET_MODE_SIZE (mode)))
3203 && REG_P (index)
3204 && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
3205 {
3206 info->type = type;
3207 info->offset = index;
3208 info->shift = shift;
3209 return true;
3210 }
3211
3212 return false;
3213 }
3214
3215 bool
3216 aarch64_offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
3217 {
3218 return (offset >= -64 * GET_MODE_SIZE (mode)
3219 && offset < 64 * GET_MODE_SIZE (mode)
3220 && offset % GET_MODE_SIZE (mode) == 0);
3221 }
3222
3223 static inline bool
3224 offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED,
3225 HOST_WIDE_INT offset)
3226 {
3227 return offset >= -256 && offset < 256;
3228 }
3229
3230 static inline bool
3231 offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
3232 {
3233 return (offset >= 0
3234 && offset < 4096 * GET_MODE_SIZE (mode)
3235 && offset % GET_MODE_SIZE (mode) == 0);
3236 }
3237
3238 /* Return true if X is a valid address for machine mode MODE. If it is,
3239 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
3240 effect. OUTER_CODE is PARALLEL for a load/store pair. */
3241
3242 static bool
3243 aarch64_classify_address (struct aarch64_address_info *info,
3244 rtx x, enum machine_mode mode,
3245 RTX_CODE outer_code, bool strict_p)
3246 {
3247 enum rtx_code code = GET_CODE (x);
3248 rtx op0, op1;
3249 bool allow_reg_index_p =
3250 outer_code != PARALLEL && (GET_MODE_SIZE (mode) != 16
3251 || aarch64_vector_mode_supported_p (mode));
3252 /* Don't support anything other than POST_INC or REG addressing for
3253 AdvSIMD. */
3254 if (aarch64_vect_struct_mode_p (mode)
3255 && (code != POST_INC && code != REG))
3256 return false;
3257
3258 switch (code)
3259 {
3260 case REG:
3261 case SUBREG:
3262 info->type = ADDRESS_REG_IMM;
3263 info->base = x;
3264 info->offset = const0_rtx;
3265 return aarch64_base_register_rtx_p (x, strict_p);
3266
3267 case PLUS:
3268 op0 = XEXP (x, 0);
3269 op1 = XEXP (x, 1);
3270
3271 if (! strict_p
3272 && REG_P (op0)
3273 && (op0 == virtual_stack_vars_rtx
3274 || op0 == frame_pointer_rtx
3275 || op0 == arg_pointer_rtx)
3276 && CONST_INT_P (op1))
3277 {
3278 info->type = ADDRESS_REG_IMM;
3279 info->base = op0;
3280 info->offset = op1;
3281
3282 return true;
3283 }
3284
3285 if (GET_MODE_SIZE (mode) != 0
3286 && CONST_INT_P (op1)
3287 && aarch64_base_register_rtx_p (op0, strict_p))
3288 {
3289 HOST_WIDE_INT offset = INTVAL (op1);
3290
3291 info->type = ADDRESS_REG_IMM;
3292 info->base = op0;
3293 info->offset = op1;
3294
3295 /* TImode and TFmode values are allowed in both pairs of X
3296 registers and individual Q registers. The available
3297 address modes are:
3298 X,X: 7-bit signed scaled offset
3299 Q: 9-bit signed offset
3300 We conservatively require an offset representable in either mode.
3301 */
3302 if (mode == TImode || mode == TFmode)
3303 return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
3304 && offset_9bit_signed_unscaled_p (mode, offset));
3305
3306 if (outer_code == PARALLEL)
3307 return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
3308 && aarch64_offset_7bit_signed_scaled_p (mode, offset));
3309 else
3310 return (offset_9bit_signed_unscaled_p (mode, offset)
3311 || offset_12bit_unsigned_scaled_p (mode, offset));
3312 }
3313
3314 if (allow_reg_index_p)
3315 {
3316 /* Look for base + (scaled/extended) index register. */
3317 if (aarch64_base_register_rtx_p (op0, strict_p)
3318 && aarch64_classify_index (info, op1, mode, strict_p))
3319 {
3320 info->base = op0;
3321 return true;
3322 }
3323 if (aarch64_base_register_rtx_p (op1, strict_p)
3324 && aarch64_classify_index (info, op0, mode, strict_p))
3325 {
3326 info->base = op1;
3327 return true;
3328 }
3329 }
3330
3331 return false;
3332
3333 case POST_INC:
3334 case POST_DEC:
3335 case PRE_INC:
3336 case PRE_DEC:
3337 info->type = ADDRESS_REG_WB;
3338 info->base = XEXP (x, 0);
3339 info->offset = NULL_RTX;
3340 return aarch64_base_register_rtx_p (info->base, strict_p);
3341
3342 case POST_MODIFY:
3343 case PRE_MODIFY:
3344 info->type = ADDRESS_REG_WB;
3345 info->base = XEXP (x, 0);
3346 if (GET_CODE (XEXP (x, 1)) == PLUS
3347 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3348 && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
3349 && aarch64_base_register_rtx_p (info->base, strict_p))
3350 {
3351 HOST_WIDE_INT offset;
3352 info->offset = XEXP (XEXP (x, 1), 1);
3353 offset = INTVAL (info->offset);
3354
3355 /* TImode and TFmode values are allowed in both pairs of X
3356 registers and individual Q registers. The available
3357 address modes are:
3358 X,X: 7-bit signed scaled offset
3359 Q: 9-bit signed offset
3360 We conservatively require an offset representable in either mode.
3361 */
3362 if (mode == TImode || mode == TFmode)
3363 return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
3364 && offset_9bit_signed_unscaled_p (mode, offset));
3365
3366 if (outer_code == PARALLEL)
3367 return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
3368 && aarch64_offset_7bit_signed_scaled_p (mode, offset));
3369 else
3370 return offset_9bit_signed_unscaled_p (mode, offset);
3371 }
3372 return false;
3373
3374 case CONST:
3375 case SYMBOL_REF:
3376 case LABEL_REF:
3377 /* load literal: pc-relative constant pool entry. Only supported
3378 for SI mode or larger. */
3379 info->type = ADDRESS_SYMBOLIC;
3380 if (outer_code != PARALLEL && GET_MODE_SIZE (mode) >= 4)
3381 {
3382 rtx sym, addend;
3383
3384 split_const (x, &sym, &addend);
3385 return (GET_CODE (sym) == LABEL_REF
3386 || (GET_CODE (sym) == SYMBOL_REF
3387 && CONSTANT_POOL_ADDRESS_P (sym)));
3388 }
3389 return false;
3390
3391 case LO_SUM:
3392 info->type = ADDRESS_LO_SUM;
3393 info->base = XEXP (x, 0);
3394 info->offset = XEXP (x, 1);
3395 if (allow_reg_index_p
3396 && aarch64_base_register_rtx_p (info->base, strict_p))
3397 {
3398 rtx sym, offs;
3399 split_const (info->offset, &sym, &offs);
3400 if (GET_CODE (sym) == SYMBOL_REF
3401 && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM)
3402 == SYMBOL_SMALL_ABSOLUTE))
3403 {
3404 /* The symbol and offset must be aligned to the access size. */
3405 unsigned int align;
3406 unsigned int ref_size;
3407
3408 if (CONSTANT_POOL_ADDRESS_P (sym))
3409 align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
3410 else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
3411 {
3412 tree exp = SYMBOL_REF_DECL (sym);
3413 align = TYPE_ALIGN (TREE_TYPE (exp));
3414 align = CONSTANT_ALIGNMENT (exp, align);
3415 }
3416 else if (SYMBOL_REF_DECL (sym))
3417 align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
3418 else if (SYMBOL_REF_HAS_BLOCK_INFO_P (sym)
3419 && SYMBOL_REF_BLOCK (sym) != NULL)
3420 align = SYMBOL_REF_BLOCK (sym)->alignment;
3421 else
3422 align = BITS_PER_UNIT;
3423
3424 ref_size = GET_MODE_SIZE (mode);
3425 if (ref_size == 0)
3426 ref_size = GET_MODE_SIZE (DImode);
3427
3428 return ((INTVAL (offs) & (ref_size - 1)) == 0
3429 && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
3430 }
3431 }
3432 return false;
3433
3434 default:
3435 return false;
3436 }
3437 }
3438
3439 bool
3440 aarch64_symbolic_address_p (rtx x)
3441 {
3442 rtx offset;
3443
3444 split_const (x, &x, &offset);
3445 return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
3446 }
3447
3448 /* Classify the base of symbolic expression X, given that X appears in
3449 context CONTEXT. */
3450
3451 enum aarch64_symbol_type
3452 aarch64_classify_symbolic_expression (rtx x,
3453 enum aarch64_symbol_context context)
3454 {
3455 rtx offset;
3456
3457 split_const (x, &x, &offset);
3458 return aarch64_classify_symbol (x, context);
3459 }
3460
3461
3462 /* Return TRUE if X is a legitimate address for accessing memory in
3463 mode MODE. */
3464 static bool
3465 aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p)
3466 {
3467 struct aarch64_address_info addr;
3468
3469 return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
3470 }
3471
3472 /* Return TRUE if X is a legitimate address for accessing memory in
3473 mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
3474 pair operation. */
3475 bool
3476 aarch64_legitimate_address_p (enum machine_mode mode, rtx x,
3477 RTX_CODE outer_code, bool strict_p)
3478 {
3479 struct aarch64_address_info addr;
3480
3481 return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
3482 }
3483
3484 /* Return TRUE if rtx X is immediate constant 0.0 */
3485 bool
3486 aarch64_float_const_zero_rtx_p (rtx x)
3487 {
3488 REAL_VALUE_TYPE r;
3489
3490 if (GET_MODE (x) == VOIDmode)
3491 return false;
3492
3493 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3494 if (REAL_VALUE_MINUS_ZERO (r))
3495 return !HONOR_SIGNED_ZEROS (GET_MODE (x));
3496 return REAL_VALUES_EQUAL (r, dconst0);
3497 }
3498
3499 /* Return the fixed registers used for condition codes. */
3500
3501 static bool
3502 aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3503 {
3504 *p1 = CC_REGNUM;
3505 *p2 = INVALID_REGNUM;
3506 return true;
3507 }
3508
3509 /* Emit call insn with PAT and do aarch64-specific handling. */
3510
3511 void
3512 aarch64_emit_call_insn (rtx pat)
3513 {
3514 rtx insn = emit_call_insn (pat);
3515
3516 rtx *fusage = &CALL_INSN_FUNCTION_USAGE (insn);
3517 clobber_reg (fusage, gen_rtx_REG (word_mode, IP0_REGNUM));
3518 clobber_reg (fusage, gen_rtx_REG (word_mode, IP1_REGNUM));
3519 }
3520
3521 enum machine_mode
3522 aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
3523 {
3524 /* All floating point compares return CCFP if it is an equality
3525 comparison, and CCFPE otherwise. */
3526 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3527 {
3528 switch (code)
3529 {
3530 case EQ:
3531 case NE:
3532 case UNORDERED:
3533 case ORDERED:
3534 case UNLT:
3535 case UNLE:
3536 case UNGT:
3537 case UNGE:
3538 case UNEQ:
3539 case LTGT:
3540 return CCFPmode;
3541
3542 case LT:
3543 case LE:
3544 case GT:
3545 case GE:
3546 return CCFPEmode;
3547
3548 default:
3549 gcc_unreachable ();
3550 }
3551 }
3552
3553 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
3554 && y == const0_rtx
3555 && (code == EQ || code == NE || code == LT || code == GE)
3556 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND
3557 || GET_CODE (x) == NEG))
3558 return CC_NZmode;
3559
3560 /* A compare with a shifted operand. Because of canonicalization,
3561 the comparison will have to be swapped when we emit the assembly
3562 code. */
3563 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
3564 && (REG_P (y) || GET_CODE (y) == SUBREG)
3565 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
3566 || GET_CODE (x) == LSHIFTRT
3567 || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
3568 return CC_SWPmode;
3569
3570 /* Similarly for a negated operand, but we can only do this for
3571 equalities. */
3572 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
3573 && (REG_P (y) || GET_CODE (y) == SUBREG)
3574 && (code == EQ || code == NE)
3575 && GET_CODE (x) == NEG)
3576 return CC_Zmode;
3577
3578 /* A compare of a mode narrower than SI mode against zero can be done
3579 by extending the value in the comparison. */
3580 if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode)
3581 && y == const0_rtx)
3582 /* Only use sign-extension if we really need it. */
3583 return ((code == GT || code == GE || code == LE || code == LT)
3584 ? CC_SESWPmode : CC_ZESWPmode);
3585
3586 /* For everything else, return CCmode. */
3587 return CCmode;
3588 }
3589
3590 static unsigned
3591 aarch64_get_condition_code (rtx x)
3592 {
3593 enum machine_mode mode = GET_MODE (XEXP (x, 0));
3594 enum rtx_code comp_code = GET_CODE (x);
3595
3596 if (GET_MODE_CLASS (mode) != MODE_CC)
3597 mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
3598
3599 switch (mode)
3600 {
3601 case CCFPmode:
3602 case CCFPEmode:
3603 switch (comp_code)
3604 {
3605 case GE: return AARCH64_GE;
3606 case GT: return AARCH64_GT;
3607 case LE: return AARCH64_LS;
3608 case LT: return AARCH64_MI;
3609 case NE: return AARCH64_NE;
3610 case EQ: return AARCH64_EQ;
3611 case ORDERED: return AARCH64_VC;
3612 case UNORDERED: return AARCH64_VS;
3613 case UNLT: return AARCH64_LT;
3614 case UNLE: return AARCH64_LE;
3615 case UNGT: return AARCH64_HI;
3616 case UNGE: return AARCH64_PL;
3617 default: gcc_unreachable ();
3618 }
3619 break;
3620
3621 case CCmode:
3622 switch (comp_code)
3623 {
3624 case NE: return AARCH64_NE;
3625 case EQ: return AARCH64_EQ;
3626 case GE: return AARCH64_GE;
3627 case GT: return AARCH64_GT;
3628 case LE: return AARCH64_LE;
3629 case LT: return AARCH64_LT;
3630 case GEU: return AARCH64_CS;
3631 case GTU: return AARCH64_HI;
3632 case LEU: return AARCH64_LS;
3633 case LTU: return AARCH64_CC;
3634 default: gcc_unreachable ();
3635 }
3636 break;
3637
3638 case CC_SWPmode:
3639 case CC_ZESWPmode:
3640 case CC_SESWPmode:
3641 switch (comp_code)
3642 {
3643 case NE: return AARCH64_NE;
3644 case EQ: return AARCH64_EQ;
3645 case GE: return AARCH64_LE;
3646 case GT: return AARCH64_LT;
3647 case LE: return AARCH64_GE;
3648 case LT: return AARCH64_GT;
3649 case GEU: return AARCH64_LS;
3650 case GTU: return AARCH64_CC;
3651 case LEU: return AARCH64_CS;
3652 case LTU: return AARCH64_HI;
3653 default: gcc_unreachable ();
3654 }
3655 break;
3656
3657 case CC_NZmode:
3658 switch (comp_code)
3659 {
3660 case NE: return AARCH64_NE;
3661 case EQ: return AARCH64_EQ;
3662 case GE: return AARCH64_PL;
3663 case LT: return AARCH64_MI;
3664 default: gcc_unreachable ();
3665 }
3666 break;
3667
3668 case CC_Zmode:
3669 switch (comp_code)
3670 {
3671 case NE: return AARCH64_NE;
3672 case EQ: return AARCH64_EQ;
3673 default: gcc_unreachable ();
3674 }
3675 break;
3676
3677 default:
3678 gcc_unreachable ();
3679 break;
3680 }
3681 }
3682
3683 static unsigned
3684 bit_count (unsigned HOST_WIDE_INT value)
3685 {
3686 unsigned count = 0;
3687
3688 while (value)
3689 {
3690 count++;
3691 value &= value - 1;
3692 }
3693
3694 return count;
3695 }
3696
3697 void
3698 aarch64_print_operand (FILE *f, rtx x, char code)
3699 {
3700 switch (code)
3701 {
3702 /* An integer or symbol address without a preceding # sign. */
3703 case 'c':
3704 switch (GET_CODE (x))
3705 {
3706 case CONST_INT:
3707 fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
3708 break;
3709
3710 case SYMBOL_REF:
3711 output_addr_const (f, x);
3712 break;
3713
3714 case CONST:
3715 if (GET_CODE (XEXP (x, 0)) == PLUS
3716 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
3717 {
3718 output_addr_const (f, x);
3719 break;
3720 }
3721 /* Fall through. */
3722
3723 default:
3724 output_operand_lossage ("Unsupported operand for code '%c'", code);
3725 }
3726 break;
3727
3728 case 'e':
3729 /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
3730 {
3731 int n;
3732
3733 if (!CONST_INT_P (x)
3734 || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
3735 {
3736 output_operand_lossage ("invalid operand for '%%%c'", code);
3737 return;
3738 }
3739
3740 switch (n)
3741 {
3742 case 3:
3743 fputc ('b', f);
3744 break;
3745 case 4:
3746 fputc ('h', f);
3747 break;
3748 case 5:
3749 fputc ('w', f);
3750 break;
3751 default:
3752 output_operand_lossage ("invalid operand for '%%%c'", code);
3753 return;
3754 }
3755 }
3756 break;
3757
3758 case 'p':
3759 {
3760 int n;
3761
3762 /* Print N such that 2^N == X. */
3763 if (!CONST_INT_P (x) || (n = exact_log2 (INTVAL (x))) < 0)
3764 {
3765 output_operand_lossage ("invalid operand for '%%%c'", code);
3766 return;
3767 }
3768
3769 asm_fprintf (f, "%d", n);
3770 }
3771 break;
3772
3773 case 'P':
3774 /* Print the number of non-zero bits in X (a const_int). */
3775 if (!CONST_INT_P (x))
3776 {
3777 output_operand_lossage ("invalid operand for '%%%c'", code);
3778 return;
3779 }
3780
3781 asm_fprintf (f, "%u", bit_count (INTVAL (x)));
3782 break;
3783
3784 case 'H':
3785 /* Print the higher numbered register of a pair (TImode) of regs. */
3786 if (!REG_P (x) || !GP_REGNUM_P (REGNO (x) + 1))
3787 {
3788 output_operand_lossage ("invalid operand for '%%%c'", code);
3789 return;
3790 }
3791
3792 asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]);
3793 break;
3794
3795 case 'm':
3796 /* Print a condition (eq, ne, etc). */
3797
3798 /* CONST_TRUE_RTX means always -- that's the default. */
3799 if (x == const_true_rtx)
3800 return;
3801
3802 if (!COMPARISON_P (x))
3803 {
3804 output_operand_lossage ("invalid operand for '%%%c'", code);
3805 return;
3806 }
3807
3808 fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f);
3809 break;
3810
3811 case 'M':
3812 /* Print the inverse of a condition (eq <-> ne, etc). */
3813
3814 /* CONST_TRUE_RTX means never -- that's the default. */
3815 if (x == const_true_rtx)
3816 {
3817 fputs ("nv", f);
3818 return;
3819 }
3820
3821 if (!COMPARISON_P (x))
3822 {
3823 output_operand_lossage ("invalid operand for '%%%c'", code);
3824 return;
3825 }
3826
3827 fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE
3828 (aarch64_get_condition_code (x))], f);
3829 break;
3830
3831 case 'b':
3832 case 'h':
3833 case 's':
3834 case 'd':
3835 case 'q':
3836 /* Print a scalar FP/SIMD register name. */
3837 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
3838 {
3839 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
3840 return;
3841 }
3842 asm_fprintf (f, "%c%d", code, REGNO (x) - V0_REGNUM);
3843 break;
3844
3845 case 'S':
3846 case 'T':
3847 case 'U':
3848 case 'V':
3849 /* Print the first FP/SIMD register name in a list. */
3850 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
3851 {
3852 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
3853 return;
3854 }
3855 asm_fprintf (f, "v%d", REGNO (x) - V0_REGNUM + (code - 'S'));
3856 break;
3857
3858 case 'X':
3859 /* Print bottom 16 bits of integer constant in hex. */
3860 if (!CONST_INT_P (x))
3861 {
3862 output_operand_lossage ("invalid operand for '%%%c'", code);
3863 return;
3864 }
3865 asm_fprintf (f, "0x%wx", UINTVAL (x) & 0xffff);
3866 break;
3867
3868 case 'w':
3869 case 'x':
3870 /* Print a general register name or the zero register (32-bit or
3871 64-bit). */
3872 if (x == const0_rtx
3873 || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x)))
3874 {
3875 asm_fprintf (f, "%czr", code);
3876 break;
3877 }
3878
3879 if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
3880 {
3881 asm_fprintf (f, "%c%d", code, REGNO (x) - R0_REGNUM);
3882 break;
3883 }
3884
3885 if (REG_P (x) && REGNO (x) == SP_REGNUM)
3886 {
3887 asm_fprintf (f, "%ssp", code == 'w' ? "w" : "");
3888 break;
3889 }
3890
3891 /* Fall through */
3892
3893 case 0:
3894 /* Print a normal operand, if it's a general register, then we
3895 assume DImode. */
3896 if (x == NULL)
3897 {
3898 output_operand_lossage ("missing operand");
3899 return;
3900 }
3901
3902 switch (GET_CODE (x))
3903 {
3904 case REG:
3905 asm_fprintf (f, "%s", reg_names [REGNO (x)]);
3906 break;
3907
3908 case MEM:
3909 aarch64_memory_reference_mode = GET_MODE (x);
3910 output_address (XEXP (x, 0));
3911 break;
3912
3913 case LABEL_REF:
3914 case SYMBOL_REF:
3915 output_addr_const (asm_out_file, x);
3916 break;
3917
3918 case CONST_INT:
3919 asm_fprintf (f, "%wd", INTVAL (x));
3920 break;
3921
3922 case CONST_VECTOR:
3923 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT)
3924 {
3925 gcc_assert (aarch64_const_vec_all_same_int_p (x,
3926 HOST_WIDE_INT_MIN,
3927 HOST_WIDE_INT_MAX));
3928 asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
3929 }
3930 else if (aarch64_simd_imm_zero_p (x, GET_MODE (x)))
3931 {
3932 fputc ('0', f);
3933 }
3934 else
3935 gcc_unreachable ();
3936 break;
3937
3938 case CONST_DOUBLE:
3939 /* CONST_DOUBLE can represent a double-width integer.
3940 In this case, the mode of x is VOIDmode. */
3941 if (GET_MODE (x) == VOIDmode)
3942 ; /* Do Nothing. */
3943 else if (aarch64_float_const_zero_rtx_p (x))
3944 {
3945 fputc ('0', f);
3946 break;
3947 }
3948 else if (aarch64_float_const_representable_p (x))
3949 {
3950 #define buf_size 20
3951 char float_buf[buf_size] = {'\0'};
3952 REAL_VALUE_TYPE r;
3953 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3954 real_to_decimal_for_mode (float_buf, &r,
3955 buf_size, buf_size,
3956 1, GET_MODE (x));
3957 asm_fprintf (asm_out_file, "%s", float_buf);
3958 break;
3959 #undef buf_size
3960 }
3961 output_operand_lossage ("invalid constant");
3962 return;
3963 default:
3964 output_operand_lossage ("invalid operand");
3965 return;
3966 }
3967 break;
3968
3969 case 'A':
3970 if (GET_CODE (x) == HIGH)
3971 x = XEXP (x, 0);
3972
3973 switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
3974 {
3975 case SYMBOL_SMALL_GOT:
3976 asm_fprintf (asm_out_file, ":got:");
3977 break;
3978
3979 case SYMBOL_SMALL_TLSGD:
3980 asm_fprintf (asm_out_file, ":tlsgd:");
3981 break;
3982
3983 case SYMBOL_SMALL_TLSDESC:
3984 asm_fprintf (asm_out_file, ":tlsdesc:");
3985 break;
3986
3987 case SYMBOL_SMALL_GOTTPREL:
3988 asm_fprintf (asm_out_file, ":gottprel:");
3989 break;
3990
3991 case SYMBOL_SMALL_TPREL:
3992 asm_fprintf (asm_out_file, ":tprel:");
3993 break;
3994
3995 case SYMBOL_TINY_GOT:
3996 gcc_unreachable ();
3997 break;
3998
3999 default:
4000 break;
4001 }
4002 output_addr_const (asm_out_file, x);
4003 break;
4004
4005 case 'L':
4006 switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
4007 {
4008 case SYMBOL_SMALL_GOT:
4009 asm_fprintf (asm_out_file, ":lo12:");
4010 break;
4011
4012 case SYMBOL_SMALL_TLSGD:
4013 asm_fprintf (asm_out_file, ":tlsgd_lo12:");
4014 break;
4015
4016 case SYMBOL_SMALL_TLSDESC:
4017 asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
4018 break;
4019
4020 case SYMBOL_SMALL_GOTTPREL:
4021 asm_fprintf (asm_out_file, ":gottprel_lo12:");
4022 break;
4023
4024 case SYMBOL_SMALL_TPREL:
4025 asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
4026 break;
4027
4028 case SYMBOL_TINY_GOT:
4029 asm_fprintf (asm_out_file, ":got:");
4030 break;
4031
4032 default:
4033 break;
4034 }
4035 output_addr_const (asm_out_file, x);
4036 break;
4037
4038 case 'G':
4039
4040 switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
4041 {
4042 case SYMBOL_SMALL_TPREL:
4043 asm_fprintf (asm_out_file, ":tprel_hi12:");
4044 break;
4045 default:
4046 break;
4047 }
4048 output_addr_const (asm_out_file, x);
4049 break;
4050
4051 default:
4052 output_operand_lossage ("invalid operand prefix '%%%c'", code);
4053 return;
4054 }
4055 }
4056
4057 void
4058 aarch64_print_operand_address (FILE *f, rtx x)
4059 {
4060 struct aarch64_address_info addr;
4061
4062 if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode,
4063 MEM, true))
4064 switch (addr.type)
4065 {
4066 case ADDRESS_REG_IMM:
4067 if (addr.offset == const0_rtx)
4068 asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]);
4069 else
4070 asm_fprintf (f, "[%s, %wd]", reg_names [REGNO (addr.base)],
4071 INTVAL (addr.offset));
4072 return;
4073
4074 case ADDRESS_REG_REG:
4075 if (addr.shift == 0)
4076 asm_fprintf (f, "[%s, %s]", reg_names [REGNO (addr.base)],
4077 reg_names [REGNO (addr.offset)]);
4078 else
4079 asm_fprintf (f, "[%s, %s, lsl %u]", reg_names [REGNO (addr.base)],
4080 reg_names [REGNO (addr.offset)], addr.shift);
4081 return;
4082
4083 case ADDRESS_REG_UXTW:
4084 if (addr.shift == 0)
4085 asm_fprintf (f, "[%s, w%d, uxtw]", reg_names [REGNO (addr.base)],
4086 REGNO (addr.offset) - R0_REGNUM);
4087 else
4088 asm_fprintf (f, "[%s, w%d, uxtw %u]", reg_names [REGNO (addr.base)],
4089 REGNO (addr.offset) - R0_REGNUM, addr.shift);
4090 return;
4091
4092 case ADDRESS_REG_SXTW:
4093 if (addr.shift == 0)
4094 asm_fprintf (f, "[%s, w%d, sxtw]", reg_names [REGNO (addr.base)],
4095 REGNO (addr.offset) - R0_REGNUM);
4096 else
4097 asm_fprintf (f, "[%s, w%d, sxtw %u]", reg_names [REGNO (addr.base)],
4098 REGNO (addr.offset) - R0_REGNUM, addr.shift);
4099 return;
4100
4101 case ADDRESS_REG_WB:
4102 switch (GET_CODE (x))
4103 {
4104 case PRE_INC:
4105 asm_fprintf (f, "[%s, %d]!", reg_names [REGNO (addr.base)],
4106 GET_MODE_SIZE (aarch64_memory_reference_mode));
4107 return;
4108 case POST_INC:
4109 asm_fprintf (f, "[%s], %d", reg_names [REGNO (addr.base)],
4110 GET_MODE_SIZE (aarch64_memory_reference_mode));
4111 return;
4112 case PRE_DEC:
4113 asm_fprintf (f, "[%s, -%d]!", reg_names [REGNO (addr.base)],
4114 GET_MODE_SIZE (aarch64_memory_reference_mode));
4115 return;
4116 case POST_DEC:
4117 asm_fprintf (f, "[%s], -%d", reg_names [REGNO (addr.base)],
4118 GET_MODE_SIZE (aarch64_memory_reference_mode));
4119 return;
4120 case PRE_MODIFY:
4121 asm_fprintf (f, "[%s, %wd]!", reg_names [REGNO (addr.base)],
4122 INTVAL (addr.offset));
4123 return;
4124 case POST_MODIFY:
4125 asm_fprintf (f, "[%s], %wd", reg_names [REGNO (addr.base)],
4126 INTVAL (addr.offset));
4127 return;
4128 default:
4129 break;
4130 }
4131 break;
4132
4133 case ADDRESS_LO_SUM:
4134 asm_fprintf (f, "[%s, #:lo12:", reg_names [REGNO (addr.base)]);
4135 output_addr_const (f, addr.offset);
4136 asm_fprintf (f, "]");
4137 return;
4138
4139 case ADDRESS_SYMBOLIC:
4140 break;
4141 }
4142
4143 output_addr_const (f, x);
4144 }
4145
4146 bool
4147 aarch64_label_mentioned_p (rtx x)
4148 {
4149 const char *fmt;
4150 int i;
4151
4152 if (GET_CODE (x) == LABEL_REF)
4153 return true;
4154
4155 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
4156 referencing instruction, but they are constant offsets, not
4157 symbols. */
4158 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
4159 return false;
4160
4161 fmt = GET_RTX_FORMAT (GET_CODE (x));
4162 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4163 {
4164 if (fmt[i] == 'E')
4165 {
4166 int j;
4167
4168 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4169 if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
4170 return 1;
4171 }
4172 else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
4173 return 1;
4174 }
4175
4176 return 0;
4177 }
4178
4179 /* Implement REGNO_REG_CLASS. */
4180
4181 enum reg_class
4182 aarch64_regno_regclass (unsigned regno)
4183 {
4184 if (GP_REGNUM_P (regno))
4185 return GENERAL_REGS;
4186
4187 if (regno == SP_REGNUM)
4188 return STACK_REG;
4189
4190 if (regno == FRAME_POINTER_REGNUM
4191 || regno == ARG_POINTER_REGNUM)
4192 return POINTER_REGS;
4193
4194 if (FP_REGNUM_P (regno))
4195 return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS;
4196
4197 return NO_REGS;
4198 }
4199
4200 /* Try a machine-dependent way of reloading an illegitimate address
4201 operand. If we find one, push the reload and return the new rtx. */
4202
4203 rtx
4204 aarch64_legitimize_reload_address (rtx *x_p,
4205 enum machine_mode mode,
4206 int opnum, int type,
4207 int ind_levels ATTRIBUTE_UNUSED)
4208 {
4209 rtx x = *x_p;
4210
4211 /* Do not allow mem (plus (reg, const)) if vector struct mode. */
4212 if (aarch64_vect_struct_mode_p (mode)
4213 && GET_CODE (x) == PLUS
4214 && REG_P (XEXP (x, 0))
4215 && CONST_INT_P (XEXP (x, 1)))
4216 {
4217 rtx orig_rtx = x;
4218 x = copy_rtx (x);
4219 push_reload (orig_rtx, NULL_RTX, x_p, NULL,
4220 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4221 opnum, (enum reload_type) type);
4222 return x;
4223 }
4224
4225 /* We must recognize output that we have already generated ourselves. */
4226 if (GET_CODE (x) == PLUS
4227 && GET_CODE (XEXP (x, 0)) == PLUS
4228 && REG_P (XEXP (XEXP (x, 0), 0))
4229 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4230 && CONST_INT_P (XEXP (x, 1)))
4231 {
4232 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4233 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4234 opnum, (enum reload_type) type);
4235 return x;
4236 }
4237
4238 /* We wish to handle large displacements off a base register by splitting
4239 the addend across an add and the mem insn. This can cut the number of
4240 extra insns needed from 3 to 1. It is only useful for load/store of a
4241 single register with 12 bit offset field. */
4242 if (GET_CODE (x) == PLUS
4243 && REG_P (XEXP (x, 0))
4244 && CONST_INT_P (XEXP (x, 1))
4245 && HARD_REGISTER_P (XEXP (x, 0))
4246 && mode != TImode
4247 && mode != TFmode
4248 && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true))
4249 {
4250 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
4251 HOST_WIDE_INT low = val & 0xfff;
4252 HOST_WIDE_INT high = val - low;
4253 HOST_WIDE_INT offs;
4254 rtx cst;
4255 enum machine_mode xmode = GET_MODE (x);
4256
4257 /* In ILP32, xmode can be either DImode or SImode. */
4258 gcc_assert (xmode == DImode || xmode == SImode);
4259
4260 /* Reload non-zero BLKmode offsets. This is because we cannot ascertain
4261 BLKmode alignment. */
4262 if (GET_MODE_SIZE (mode) == 0)
4263 return NULL_RTX;
4264
4265 offs = low % GET_MODE_SIZE (mode);
4266
4267 /* Align misaligned offset by adjusting high part to compensate. */
4268 if (offs != 0)
4269 {
4270 if (aarch64_uimm12_shift (high + offs))
4271 {
4272 /* Align down. */
4273 low = low - offs;
4274 high = high + offs;
4275 }
4276 else
4277 {
4278 /* Align up. */
4279 offs = GET_MODE_SIZE (mode) - offs;
4280 low = low + offs;
4281 high = high + (low & 0x1000) - offs;
4282 low &= 0xfff;
4283 }
4284 }
4285
4286 /* Check for overflow. */
4287 if (high + low != val)
4288 return NULL_RTX;
4289
4290 cst = GEN_INT (high);
4291 if (!aarch64_uimm12_shift (high))
4292 cst = force_const_mem (xmode, cst);
4293
4294 /* Reload high part into base reg, leaving the low part
4295 in the mem instruction.
4296 Note that replacing this gen_rtx_PLUS with plus_constant is
4297 wrong in this case because we rely on the
4298 (plus (plus reg c1) c2) structure being preserved so that
4299 XEXP (*p, 0) in push_reload below uses the correct term. */
4300 x = gen_rtx_PLUS (xmode,
4301 gen_rtx_PLUS (xmode, XEXP (x, 0), cst),
4302 GEN_INT (low));
4303
4304 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4305 BASE_REG_CLASS, xmode, VOIDmode, 0, 0,
4306 opnum, (enum reload_type) type);
4307 return x;
4308 }
4309
4310 return NULL_RTX;
4311 }
4312
4313
4314 static reg_class_t
4315 aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
4316 reg_class_t rclass,
4317 enum machine_mode mode,
4318 secondary_reload_info *sri)
4319 {
4320 /* Without the TARGET_SIMD instructions we cannot move a Q register
4321 to a Q register directly. We need a scratch. */
4322 if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
4323 && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
4324 && reg_class_subset_p (rclass, FP_REGS))
4325 {
4326 if (mode == TFmode)
4327 sri->icode = CODE_FOR_aarch64_reload_movtf;
4328 else if (mode == TImode)
4329 sri->icode = CODE_FOR_aarch64_reload_movti;
4330 return NO_REGS;
4331 }
4332
4333 /* A TFmode or TImode memory access should be handled via an FP_REGS
4334 because AArch64 has richer addressing modes for LDR/STR instructions
4335 than LDP/STP instructions. */
4336 if (!TARGET_GENERAL_REGS_ONLY && rclass == GENERAL_REGS
4337 && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
4338 return FP_REGS;
4339
4340 if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
4341 return GENERAL_REGS;
4342
4343 return NO_REGS;
4344 }
4345
4346 static bool
4347 aarch64_can_eliminate (const int from, const int to)
4348 {
4349 /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
4350 HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
4351
4352 if (frame_pointer_needed)
4353 {
4354 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4355 return true;
4356 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
4357 return false;
4358 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
4359 && !cfun->calls_alloca)
4360 return true;
4361 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4362 return true;
4363
4364 return false;
4365 }
4366
4367 return true;
4368 }
4369
4370 HOST_WIDE_INT
4371 aarch64_initial_elimination_offset (unsigned from, unsigned to)
4372 {
4373 aarch64_layout_frame ();
4374
4375 if (to == HARD_FRAME_POINTER_REGNUM)
4376 {
4377 if (from == ARG_POINTER_REGNUM)
4378 return cfun->machine->frame.frame_size - crtl->outgoing_args_size;
4379
4380 if (from == FRAME_POINTER_REGNUM)
4381 return (cfun->machine->frame.hard_fp_offset
4382 - cfun->machine->frame.saved_varargs_size);
4383 }
4384
4385 if (to == STACK_POINTER_REGNUM)
4386 {
4387 if (from == FRAME_POINTER_REGNUM)
4388 return (cfun->machine->frame.frame_size
4389 - cfun->machine->frame.saved_varargs_size);
4390 }
4391
4392 return cfun->machine->frame.frame_size;
4393 }
4394
4395 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
4396 previous frame. */
4397
4398 rtx
4399 aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4400 {
4401 if (count != 0)
4402 return const0_rtx;
4403 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
4404 }
4405
4406
4407 static void
4408 aarch64_asm_trampoline_template (FILE *f)
4409 {
4410 if (TARGET_ILP32)
4411 {
4412 asm_fprintf (f, "\tldr\tw%d, .+16\n", IP1_REGNUM - R0_REGNUM);
4413 asm_fprintf (f, "\tldr\tw%d, .+16\n", STATIC_CHAIN_REGNUM - R0_REGNUM);
4414 }
4415 else
4416 {
4417 asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]);
4418 asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]);
4419 }
4420 asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
4421 assemble_aligned_integer (4, const0_rtx);
4422 assemble_aligned_integer (POINTER_BYTES, const0_rtx);
4423 assemble_aligned_integer (POINTER_BYTES, const0_rtx);
4424 }
4425
4426 static void
4427 aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
4428 {
4429 rtx fnaddr, mem, a_tramp;
4430 const int tramp_code_sz = 16;
4431
4432 /* Don't need to copy the trailing D-words, we fill those in below. */
4433 emit_block_move (m_tramp, assemble_trampoline_template (),
4434 GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL);
4435 mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz);
4436 fnaddr = XEXP (DECL_RTL (fndecl), 0);
4437 if (GET_MODE (fnaddr) != ptr_mode)
4438 fnaddr = convert_memory_address (ptr_mode, fnaddr);
4439 emit_move_insn (mem, fnaddr);
4440
4441 mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz + POINTER_BYTES);
4442 emit_move_insn (mem, chain_value);
4443
4444 /* XXX We should really define a "clear_cache" pattern and use
4445 gen_clear_cache(). */
4446 a_tramp = XEXP (m_tramp, 0);
4447 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
4448 LCT_NORMAL, VOIDmode, 2, a_tramp, ptr_mode,
4449 plus_constant (ptr_mode, a_tramp, TRAMPOLINE_SIZE),
4450 ptr_mode);
4451 }
4452
4453 static unsigned char
4454 aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
4455 {
4456 switch (regclass)
4457 {
4458 case CALLER_SAVE_REGS:
4459 case POINTER_REGS:
4460 case GENERAL_REGS:
4461 case ALL_REGS:
4462 case FP_REGS:
4463 case FP_LO_REGS:
4464 return
4465 aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 :
4466 (GET_MODE_SIZE (mode) + 7) / 8;
4467 case STACK_REG:
4468 return 1;
4469
4470 case NO_REGS:
4471 return 0;
4472
4473 default:
4474 break;
4475 }
4476 gcc_unreachable ();
4477 }
4478
4479 static reg_class_t
4480 aarch64_preferred_reload_class (rtx x, reg_class_t regclass)
4481 {
4482 if (regclass == POINTER_REGS)
4483 return GENERAL_REGS;
4484
4485 if (regclass == STACK_REG)
4486 {
4487 if (REG_P(x)
4488 && reg_class_subset_p (REGNO_REG_CLASS (REGNO (x)), POINTER_REGS))
4489 return regclass;
4490
4491 return NO_REGS;
4492 }
4493
4494 /* If it's an integer immediate that MOVI can't handle, then
4495 FP_REGS is not an option, so we return NO_REGS instead. */
4496 if (CONST_INT_P (x) && reg_class_subset_p (regclass, FP_REGS)
4497 && !aarch64_simd_imm_scalar_p (x, GET_MODE (x)))
4498 return NO_REGS;
4499
4500 /* Register eliminiation can result in a request for
4501 SP+constant->FP_REGS. We cannot support such operations which
4502 use SP as source and an FP_REG as destination, so reject out
4503 right now. */
4504 if (! reg_class_subset_p (regclass, GENERAL_REGS) && GET_CODE (x) == PLUS)
4505 {
4506 rtx lhs = XEXP (x, 0);
4507
4508 /* Look through a possible SUBREG introduced by ILP32. */
4509 if (GET_CODE (lhs) == SUBREG)
4510 lhs = SUBREG_REG (lhs);
4511
4512 gcc_assert (REG_P (lhs));
4513 gcc_assert (reg_class_subset_p (REGNO_REG_CLASS (REGNO (lhs)),
4514 POINTER_REGS));
4515 return NO_REGS;
4516 }
4517
4518 return regclass;
4519 }
4520
4521 void
4522 aarch64_asm_output_labelref (FILE* f, const char *name)
4523 {
4524 asm_fprintf (f, "%U%s", name);
4525 }
4526
4527 static void
4528 aarch64_elf_asm_constructor (rtx symbol, int priority)
4529 {
4530 if (priority == DEFAULT_INIT_PRIORITY)
4531 default_ctor_section_asm_out_constructor (symbol, priority);
4532 else
4533 {
4534 section *s;
4535 char buf[18];
4536 snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
4537 s = get_section (buf, SECTION_WRITE, NULL);
4538 switch_to_section (s);
4539 assemble_align (POINTER_SIZE);
4540 assemble_aligned_integer (POINTER_BYTES, symbol);
4541 }
4542 }
4543
4544 static void
4545 aarch64_elf_asm_destructor (rtx symbol, int priority)
4546 {
4547 if (priority == DEFAULT_INIT_PRIORITY)
4548 default_dtor_section_asm_out_destructor (symbol, priority);
4549 else
4550 {
4551 section *s;
4552 char buf[18];
4553 snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
4554 s = get_section (buf, SECTION_WRITE, NULL);
4555 switch_to_section (s);
4556 assemble_align (POINTER_SIZE);
4557 assemble_aligned_integer (POINTER_BYTES, symbol);
4558 }
4559 }
4560
4561 const char*
4562 aarch64_output_casesi (rtx *operands)
4563 {
4564 char buf[100];
4565 char label[100];
4566 rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
4567 int index;
4568 static const char *const patterns[4][2] =
4569 {
4570 {
4571 "ldrb\t%w3, [%0,%w1,uxtw]",
4572 "add\t%3, %4, %w3, sxtb #2"
4573 },
4574 {
4575 "ldrh\t%w3, [%0,%w1,uxtw #1]",
4576 "add\t%3, %4, %w3, sxth #2"
4577 },
4578 {
4579 "ldr\t%w3, [%0,%w1,uxtw #2]",
4580 "add\t%3, %4, %w3, sxtw #2"
4581 },
4582 /* We assume that DImode is only generated when not optimizing and
4583 that we don't really need 64-bit address offsets. That would
4584 imply an object file with 8GB of code in a single function! */
4585 {
4586 "ldr\t%w3, [%0,%w1,uxtw #2]",
4587 "add\t%3, %4, %w3, sxtw #2"
4588 }
4589 };
4590
4591 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
4592
4593 index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
4594
4595 gcc_assert (index >= 0 && index <= 3);
4596
4597 /* Need to implement table size reduction, by chaning the code below. */
4598 output_asm_insn (patterns[index][0], operands);
4599 ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
4600 snprintf (buf, sizeof (buf),
4601 "adr\t%%4, %s", targetm.strip_name_encoding (label));
4602 output_asm_insn (buf, operands);
4603 output_asm_insn (patterns[index][1], operands);
4604 output_asm_insn ("br\t%3", operands);
4605 assemble_label (asm_out_file, label);
4606 return "";
4607 }
4608
4609
4610 /* Return size in bits of an arithmetic operand which is shifted/scaled and
4611 masked such that it is suitable for a UXTB, UXTH, or UXTW extend
4612 operator. */
4613
4614 int
4615 aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
4616 {
4617 if (shift >= 0 && shift <= 3)
4618 {
4619 int size;
4620 for (size = 8; size <= 32; size *= 2)
4621 {
4622 HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
4623 if (mask == bits << shift)
4624 return size;
4625 }
4626 }
4627 return 0;
4628 }
4629
4630 static bool
4631 aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
4632 const_rtx x ATTRIBUTE_UNUSED)
4633 {
4634 /* We can't use blocks for constants when we're using a per-function
4635 constant pool. */
4636 return false;
4637 }
4638
4639 static section *
4640 aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
4641 rtx x ATTRIBUTE_UNUSED,
4642 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
4643 {
4644 /* Force all constant pool entries into the current function section. */
4645 return function_section (current_function_decl);
4646 }
4647
4648
4649 /* Costs. */
4650
4651 /* Helper function for rtx cost calculation. Strip a shift expression
4652 from X. Returns the inner operand if successful, or the original
4653 expression on failure. */
4654 static rtx
4655 aarch64_strip_shift (rtx x)
4656 {
4657 rtx op = x;
4658
4659 /* We accept both ROTATERT and ROTATE: since the RHS must be a constant
4660 we can convert both to ROR during final output. */
4661 if ((GET_CODE (op) == ASHIFT
4662 || GET_CODE (op) == ASHIFTRT
4663 || GET_CODE (op) == LSHIFTRT
4664 || GET_CODE (op) == ROTATERT
4665 || GET_CODE (op) == ROTATE)
4666 && CONST_INT_P (XEXP (op, 1)))
4667 return XEXP (op, 0);
4668
4669 if (GET_CODE (op) == MULT
4670 && CONST_INT_P (XEXP (op, 1))
4671 && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
4672 return XEXP (op, 0);
4673
4674 return x;
4675 }
4676
4677 /* Helper function for rtx cost calculation. Strip an extend
4678 expression from X. Returns the inner operand if successful, or the
4679 original expression on failure. We deal with a number of possible
4680 canonicalization variations here. */
4681 static rtx
4682 aarch64_strip_extend (rtx x)
4683 {
4684 rtx op = x;
4685
4686 /* Zero and sign extraction of a widened value. */
4687 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
4688 && XEXP (op, 2) == const0_rtx
4689 && GET_CODE (XEXP (op, 0)) == MULT
4690 && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
4691 XEXP (op, 1)))
4692 return XEXP (XEXP (op, 0), 0);
4693
4694 /* It can also be represented (for zero-extend) as an AND with an
4695 immediate. */
4696 if (GET_CODE (op) == AND
4697 && GET_CODE (XEXP (op, 0)) == MULT
4698 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
4699 && CONST_INT_P (XEXP (op, 1))
4700 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
4701 INTVAL (XEXP (op, 1))) != 0)
4702 return XEXP (XEXP (op, 0), 0);
4703
4704 /* Now handle extended register, as this may also have an optional
4705 left shift by 1..4. */
4706 if (GET_CODE (op) == ASHIFT
4707 && CONST_INT_P (XEXP (op, 1))
4708 && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
4709 op = XEXP (op, 0);
4710
4711 if (GET_CODE (op) == ZERO_EXTEND
4712 || GET_CODE (op) == SIGN_EXTEND)
4713 op = XEXP (op, 0);
4714
4715 if (op != x)
4716 return op;
4717
4718 return x;
4719 }
4720
4721 /* Helper function for rtx cost calculation. Calculate the cost of
4722 a MULT, which may be part of a multiply-accumulate rtx. Return
4723 the calculated cost of the expression, recursing manually in to
4724 operands where needed. */
4725
4726 static int
4727 aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed)
4728 {
4729 rtx op0, op1;
4730 const struct cpu_cost_table *extra_cost
4731 = aarch64_tune_params->insn_extra_cost;
4732 int cost = 0;
4733 bool maybe_fma = (outer == PLUS || outer == MINUS);
4734 enum machine_mode mode = GET_MODE (x);
4735
4736 gcc_checking_assert (code == MULT);
4737
4738 op0 = XEXP (x, 0);
4739 op1 = XEXP (x, 1);
4740
4741 if (VECTOR_MODE_P (mode))
4742 mode = GET_MODE_INNER (mode);
4743
4744 /* Integer multiply/fma. */
4745 if (GET_MODE_CLASS (mode) == MODE_INT)
4746 {
4747 /* The multiply will be canonicalized as a shift, cost it as such. */
4748 if (CONST_INT_P (op1)
4749 && exact_log2 (INTVAL (op1)) > 0)
4750 {
4751 if (speed)
4752 {
4753 if (maybe_fma)
4754 /* ADD (shifted register). */
4755 cost += extra_cost->alu.arith_shift;
4756 else
4757 /* LSL (immediate). */
4758 cost += extra_cost->alu.shift;
4759 }
4760
4761 cost += rtx_cost (op0, GET_CODE (op0), 0, speed);
4762
4763 return cost;
4764 }
4765
4766 /* Integer multiplies or FMAs have zero/sign extending variants. */
4767 if ((GET_CODE (op0) == ZERO_EXTEND
4768 && GET_CODE (op1) == ZERO_EXTEND)
4769 || (GET_CODE (op0) == SIGN_EXTEND
4770 && GET_CODE (op1) == SIGN_EXTEND))
4771 {
4772 cost += rtx_cost (XEXP (op0, 0), MULT, 0, speed)
4773 + rtx_cost (XEXP (op1, 0), MULT, 1, speed);
4774
4775 if (speed)
4776 {
4777 if (maybe_fma)
4778 /* MADD/SMADDL/UMADDL. */
4779 cost += extra_cost->mult[0].extend_add;
4780 else
4781 /* MUL/SMULL/UMULL. */
4782 cost += extra_cost->mult[0].extend;
4783 }
4784
4785 return cost;
4786 }
4787
4788 /* This is either an integer multiply or an FMA. In both cases
4789 we want to recurse and cost the operands. */
4790 cost += rtx_cost (op0, MULT, 0, speed)
4791 + rtx_cost (op1, MULT, 1, speed);
4792
4793 if (speed)
4794 {
4795 if (maybe_fma)
4796 /* MADD. */
4797 cost += extra_cost->mult[mode == DImode].add;
4798 else
4799 /* MUL. */
4800 cost += extra_cost->mult[mode == DImode].simple;
4801 }
4802
4803 return cost;
4804 }
4805 else
4806 {
4807 if (speed)
4808 {
4809 /* Floating-point FMA/FMUL can also support negations of the
4810 operands. */
4811 if (GET_CODE (op0) == NEG)
4812 op0 = XEXP (op0, 0);
4813 if (GET_CODE (op1) == NEG)
4814 op1 = XEXP (op1, 0);
4815
4816 if (maybe_fma)
4817 /* FMADD/FNMADD/FNMSUB/FMSUB. */
4818 cost += extra_cost->fp[mode == DFmode].fma;
4819 else
4820 /* FMUL/FNMUL. */
4821 cost += extra_cost->fp[mode == DFmode].mult;
4822 }
4823
4824 cost += rtx_cost (op0, MULT, 0, speed)
4825 + rtx_cost (op1, MULT, 1, speed);
4826 return cost;
4827 }
4828 }
4829
4830 static int
4831 aarch64_address_cost (rtx x,
4832 enum machine_mode mode,
4833 addr_space_t as ATTRIBUTE_UNUSED,
4834 bool speed)
4835 {
4836 enum rtx_code c = GET_CODE (x);
4837 const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost;
4838 struct aarch64_address_info info;
4839 int cost = 0;
4840 info.shift = 0;
4841
4842 if (!aarch64_classify_address (&info, x, mode, c, false))
4843 {
4844 if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF)
4845 {
4846 /* This is a CONST or SYMBOL ref which will be split
4847 in a different way depending on the code model in use.
4848 Cost it through the generic infrastructure. */
4849 int cost_symbol_ref = rtx_cost (x, MEM, 1, speed);
4850 /* Divide through by the cost of one instruction to
4851 bring it to the same units as the address costs. */
4852 cost_symbol_ref /= COSTS_N_INSNS (1);
4853 /* The cost is then the cost of preparing the address,
4854 followed by an immediate (possibly 0) offset. */
4855 return cost_symbol_ref + addr_cost->imm_offset;
4856 }
4857 else
4858 {
4859 /* This is most likely a jump table from a case
4860 statement. */
4861 return addr_cost->register_offset;
4862 }
4863 }
4864
4865 switch (info.type)
4866 {
4867 case ADDRESS_LO_SUM:
4868 case ADDRESS_SYMBOLIC:
4869 case ADDRESS_REG_IMM:
4870 cost += addr_cost->imm_offset;
4871 break;
4872
4873 case ADDRESS_REG_WB:
4874 if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
4875 cost += addr_cost->pre_modify;
4876 else if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
4877 cost += addr_cost->post_modify;
4878 else
4879 gcc_unreachable ();
4880
4881 break;
4882
4883 case ADDRESS_REG_REG:
4884 cost += addr_cost->register_offset;
4885 break;
4886
4887 case ADDRESS_REG_UXTW:
4888 case ADDRESS_REG_SXTW:
4889 cost += addr_cost->register_extend;
4890 break;
4891
4892 default:
4893 gcc_unreachable ();
4894 }
4895
4896
4897 if (info.shift > 0)
4898 {
4899 /* For the sake of calculating the cost of the shifted register
4900 component, we can treat same sized modes in the same way. */
4901 switch (GET_MODE_BITSIZE (mode))
4902 {
4903 case 16:
4904 cost += addr_cost->addr_scale_costs.hi;
4905 break;
4906
4907 case 32:
4908 cost += addr_cost->addr_scale_costs.si;
4909 break;
4910
4911 case 64:
4912 cost += addr_cost->addr_scale_costs.di;
4913 break;
4914
4915 /* We can't tell, or this is a 128-bit vector. */
4916 default:
4917 cost += addr_cost->addr_scale_costs.ti;
4918 break;
4919 }
4920 }
4921
4922 return cost;
4923 }
4924
4925 /* Return true if the RTX X in mode MODE is a zero or sign extract
4926 usable in an ADD or SUB (extended register) instruction. */
4927 static bool
4928 aarch64_rtx_arith_op_extract_p (rtx x, enum machine_mode mode)
4929 {
4930 /* Catch add with a sign extract.
4931 This is add_<optab><mode>_multp2. */
4932 if (GET_CODE (x) == SIGN_EXTRACT
4933 || GET_CODE (x) == ZERO_EXTRACT)
4934 {
4935 rtx op0 = XEXP (x, 0);
4936 rtx op1 = XEXP (x, 1);
4937 rtx op2 = XEXP (x, 2);
4938
4939 if (GET_CODE (op0) == MULT
4940 && CONST_INT_P (op1)
4941 && op2 == const0_rtx
4942 && CONST_INT_P (XEXP (op0, 1))
4943 && aarch64_is_extend_from_extract (mode,
4944 XEXP (op0, 1),
4945 op1))
4946 {
4947 return true;
4948 }
4949 }
4950
4951 return false;
4952 }
4953
4954 static bool
4955 aarch64_frint_unspec_p (unsigned int u)
4956 {
4957 switch (u)
4958 {
4959 case UNSPEC_FRINTZ:
4960 case UNSPEC_FRINTP:
4961 case UNSPEC_FRINTM:
4962 case UNSPEC_FRINTA:
4963 case UNSPEC_FRINTN:
4964 case UNSPEC_FRINTX:
4965 case UNSPEC_FRINTI:
4966 return true;
4967
4968 default:
4969 return false;
4970 }
4971 }
4972
4973 /* Calculate the cost of calculating (if_then_else (OP0) (OP1) (OP2)),
4974 storing it in *COST. Result is true if the total cost of the operation
4975 has now been calculated. */
4976 static bool
4977 aarch64_if_then_else_costs (rtx op0, rtx op1, rtx op2, int *cost, bool speed)
4978 {
4979 rtx inner;
4980 rtx comparator;
4981 enum rtx_code cmpcode;
4982
4983 if (COMPARISON_P (op0))
4984 {
4985 inner = XEXP (op0, 0);
4986 comparator = XEXP (op0, 1);
4987 cmpcode = GET_CODE (op0);
4988 }
4989 else
4990 {
4991 inner = op0;
4992 comparator = const0_rtx;
4993 cmpcode = NE;
4994 }
4995
4996 if (GET_CODE (op1) == PC || GET_CODE (op2) == PC)
4997 {
4998 /* Conditional branch. */
4999 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_CC)
5000 return true;
5001 else
5002 {
5003 if (cmpcode == NE || cmpcode == EQ)
5004 {
5005 if (comparator == const0_rtx)
5006 {
5007 /* TBZ/TBNZ/CBZ/CBNZ. */
5008 if (GET_CODE (inner) == ZERO_EXTRACT)
5009 /* TBZ/TBNZ. */
5010 *cost += rtx_cost (XEXP (inner, 0), ZERO_EXTRACT,
5011 0, speed);
5012 else
5013 /* CBZ/CBNZ. */
5014 *cost += rtx_cost (inner, cmpcode, 0, speed);
5015
5016 return true;
5017 }
5018 }
5019 else if (cmpcode == LT || cmpcode == GE)
5020 {
5021 /* TBZ/TBNZ. */
5022 if (comparator == const0_rtx)
5023 return true;
5024 }
5025 }
5026 }
5027 else if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_CC)
5028 {
5029 /* It's a conditional operation based on the status flags,
5030 so it must be some flavor of CSEL. */
5031
5032 /* CSNEG, CSINV, and CSINC are handled for free as part of CSEL. */
5033 if (GET_CODE (op1) == NEG
5034 || GET_CODE (op1) == NOT
5035 || (GET_CODE (op1) == PLUS && XEXP (op1, 1) == const1_rtx))
5036 op1 = XEXP (op1, 0);
5037
5038 *cost += rtx_cost (op1, IF_THEN_ELSE, 1, speed);
5039 *cost += rtx_cost (op2, IF_THEN_ELSE, 2, speed);
5040 return true;
5041 }
5042
5043 /* We don't know what this is, cost all operands. */
5044 return false;
5045 }
5046
5047 /* Calculate the cost of calculating X, storing it in *COST. Result
5048 is true if the total cost of the operation has now been calculated. */
5049 static bool
5050 aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED,
5051 int param ATTRIBUTE_UNUSED, int *cost, bool speed)
5052 {
5053 rtx op0, op1, op2;
5054 const struct cpu_cost_table *extra_cost
5055 = aarch64_tune_params->insn_extra_cost;
5056 enum machine_mode mode = GET_MODE (x);
5057
5058 /* By default, assume that everything has equivalent cost to the
5059 cheapest instruction. Any additional costs are applied as a delta
5060 above this default. */
5061 *cost = COSTS_N_INSNS (1);
5062
5063 /* TODO: The cost infrastructure currently does not handle
5064 vector operations. Assume that all vector operations
5065 are equally expensive. */
5066 if (VECTOR_MODE_P (mode))
5067 {
5068 if (speed)
5069 *cost += extra_cost->vect.alu;
5070 return true;
5071 }
5072
5073 switch (code)
5074 {
5075 case SET:
5076 /* The cost depends entirely on the operands to SET. */
5077 *cost = 0;
5078 op0 = SET_DEST (x);
5079 op1 = SET_SRC (x);
5080
5081 switch (GET_CODE (op0))
5082 {
5083 case MEM:
5084 if (speed)
5085 {
5086 rtx address = XEXP (op0, 0);
5087 if (GET_MODE_CLASS (mode) == MODE_INT)
5088 *cost += extra_cost->ldst.store;
5089 else if (mode == SFmode)
5090 *cost += extra_cost->ldst.storef;
5091 else if (mode == DFmode)
5092 *cost += extra_cost->ldst.stored;
5093
5094 *cost +=
5095 COSTS_N_INSNS (aarch64_address_cost (address, mode,
5096 0, speed));
5097 }
5098
5099 *cost += rtx_cost (op1, SET, 1, speed);
5100 return true;
5101
5102 case SUBREG:
5103 if (! REG_P (SUBREG_REG (op0)))
5104 *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed);
5105
5106 /* Fall through. */
5107 case REG:
5108 /* const0_rtx is in general free, but we will use an
5109 instruction to set a register to 0. */
5110 if (REG_P (op1) || op1 == const0_rtx)
5111 {
5112 /* The cost is 1 per register copied. */
5113 int n_minus_1 = (GET_MODE_SIZE (GET_MODE (op0)) - 1)
5114 / UNITS_PER_WORD;
5115 *cost = COSTS_N_INSNS (n_minus_1 + 1);
5116 }
5117 else
5118 /* Cost is just the cost of the RHS of the set. */
5119 *cost += rtx_cost (op1, SET, 1, speed);
5120 return true;
5121
5122 case ZERO_EXTRACT:
5123 case SIGN_EXTRACT:
5124 /* Bit-field insertion. Strip any redundant widening of
5125 the RHS to meet the width of the target. */
5126 if (GET_CODE (op1) == SUBREG)
5127 op1 = SUBREG_REG (op1);
5128 if ((GET_CODE (op1) == ZERO_EXTEND
5129 || GET_CODE (op1) == SIGN_EXTEND)
5130 && CONST_INT_P (XEXP (op0, 1))
5131 && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
5132 >= INTVAL (XEXP (op0, 1))))
5133 op1 = XEXP (op1, 0);
5134
5135 if (CONST_INT_P (op1))
5136 {
5137 /* MOV immediate is assumed to always be cheap. */
5138 *cost = COSTS_N_INSNS (1);
5139 }
5140 else
5141 {
5142 /* BFM. */
5143 if (speed)
5144 *cost += extra_cost->alu.bfi;
5145 *cost += rtx_cost (op1, (enum rtx_code) code, 1, speed);
5146 }
5147
5148 return true;
5149
5150 default:
5151 /* We can't make sense of this, assume default cost. */
5152 *cost = COSTS_N_INSNS (1);
5153 return false;
5154 }
5155 return false;
5156
5157 case CONST_INT:
5158 /* If an instruction can incorporate a constant within the
5159 instruction, the instruction's expression avoids calling
5160 rtx_cost() on the constant. If rtx_cost() is called on a
5161 constant, then it is usually because the constant must be
5162 moved into a register by one or more instructions.
5163
5164 The exception is constant 0, which can be expressed
5165 as XZR/WZR and is therefore free. The exception to this is
5166 if we have (set (reg) (const0_rtx)) in which case we must cost
5167 the move. However, we can catch that when we cost the SET, so
5168 we don't need to consider that here. */
5169 if (x == const0_rtx)
5170 *cost = 0;
5171 else
5172 {
5173 /* To an approximation, building any other constant is
5174 proportionally expensive to the number of instructions
5175 required to build that constant. This is true whether we
5176 are compiling for SPEED or otherwise. */
5177 *cost = COSTS_N_INSNS (aarch64_build_constant (0,
5178 INTVAL (x),
5179 false));
5180 }
5181 return true;
5182
5183 case CONST_DOUBLE:
5184 if (speed)
5185 {
5186 /* mov[df,sf]_aarch64. */
5187 if (aarch64_float_const_representable_p (x))
5188 /* FMOV (scalar immediate). */
5189 *cost += extra_cost->fp[mode == DFmode].fpconst;
5190 else if (!aarch64_float_const_zero_rtx_p (x))
5191 {
5192 /* This will be a load from memory. */
5193 if (mode == DFmode)
5194 *cost += extra_cost->ldst.loadd;
5195 else
5196 *cost += extra_cost->ldst.loadf;
5197 }
5198 else
5199 /* Otherwise this is +0.0. We get this using MOVI d0, #0
5200 or MOV v0.s[0], wzr - neither of which are modeled by the
5201 cost tables. Just use the default cost. */
5202 {
5203 }
5204 }
5205
5206 return true;
5207
5208 case MEM:
5209 if (speed)
5210 {
5211 /* For loads we want the base cost of a load, plus an
5212 approximation for the additional cost of the addressing
5213 mode. */
5214 rtx address = XEXP (x, 0);
5215 if (GET_MODE_CLASS (mode) == MODE_INT)
5216 *cost += extra_cost->ldst.load;
5217 else if (mode == SFmode)
5218 *cost += extra_cost->ldst.loadf;
5219 else if (mode == DFmode)
5220 *cost += extra_cost->ldst.loadd;
5221
5222 *cost +=
5223 COSTS_N_INSNS (aarch64_address_cost (address, mode,
5224 0, speed));
5225 }
5226
5227 return true;
5228
5229 case NEG:
5230 op0 = XEXP (x, 0);
5231
5232 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
5233 {
5234 if (GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMPARE
5235 || GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMM_COMPARE)
5236 {
5237 /* CSETM. */
5238 *cost += rtx_cost (XEXP (op0, 0), NEG, 0, speed);
5239 return true;
5240 }
5241
5242 /* Cost this as SUB wzr, X. */
5243 op0 = CONST0_RTX (GET_MODE (x));
5244 op1 = XEXP (x, 0);
5245 goto cost_minus;
5246 }
5247
5248 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5249 {
5250 /* Support (neg(fma...)) as a single instruction only if
5251 sign of zeros is unimportant. This matches the decision
5252 making in aarch64.md. */
5253 if (GET_CODE (op0) == FMA && !HONOR_SIGNED_ZEROS (GET_MODE (op0)))
5254 {
5255 /* FNMADD. */
5256 *cost = rtx_cost (op0, NEG, 0, speed);
5257 return true;
5258 }
5259 if (speed)
5260 /* FNEG. */
5261 *cost += extra_cost->fp[mode == DFmode].neg;
5262 return false;
5263 }
5264
5265 return false;
5266
5267 case CLRSB:
5268 case CLZ:
5269 if (speed)
5270 *cost += extra_cost->alu.clz;
5271
5272 return false;
5273
5274 case COMPARE:
5275 op0 = XEXP (x, 0);
5276 op1 = XEXP (x, 1);
5277
5278 if (op1 == const0_rtx
5279 && GET_CODE (op0) == AND)
5280 {
5281 x = op0;
5282 goto cost_logic;
5283 }
5284
5285 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
5286 {
5287 /* TODO: A write to the CC flags possibly costs extra, this
5288 needs encoding in the cost tables. */
5289
5290 /* CC_ZESWPmode supports zero extend for free. */
5291 if (GET_MODE (x) == CC_ZESWPmode && GET_CODE (op0) == ZERO_EXTEND)
5292 op0 = XEXP (op0, 0);
5293
5294 /* ANDS. */
5295 if (GET_CODE (op0) == AND)
5296 {
5297 x = op0;
5298 goto cost_logic;
5299 }
5300
5301 if (GET_CODE (op0) == PLUS)
5302 {
5303 /* ADDS (and CMN alias). */
5304 x = op0;
5305 goto cost_plus;
5306 }
5307
5308 if (GET_CODE (op0) == MINUS)
5309 {
5310 /* SUBS. */
5311 x = op0;
5312 goto cost_minus;
5313 }
5314
5315 if (GET_CODE (op1) == NEG)
5316 {
5317 /* CMN. */
5318 if (speed)
5319 *cost += extra_cost->alu.arith;
5320
5321 *cost += rtx_cost (op0, COMPARE, 0, speed);
5322 *cost += rtx_cost (XEXP (op1, 0), NEG, 1, speed);
5323 return true;
5324 }
5325
5326 /* CMP.
5327
5328 Compare can freely swap the order of operands, and
5329 canonicalization puts the more complex operation first.
5330 But the integer MINUS logic expects the shift/extend
5331 operation in op1. */
5332 if (! (REG_P (op0)
5333 || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
5334 {
5335 op0 = XEXP (x, 1);
5336 op1 = XEXP (x, 0);
5337 }
5338 goto cost_minus;
5339 }
5340
5341 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
5342 {
5343 /* FCMP. */
5344 if (speed)
5345 *cost += extra_cost->fp[mode == DFmode].compare;
5346
5347 if (CONST_DOUBLE_P (op1) && aarch64_float_const_zero_rtx_p (op1))
5348 {
5349 /* FCMP supports constant 0.0 for no extra cost. */
5350 return true;
5351 }
5352 return false;
5353 }
5354
5355 return false;
5356
5357 case MINUS:
5358 {
5359 op0 = XEXP (x, 0);
5360 op1 = XEXP (x, 1);
5361
5362 cost_minus:
5363 /* Detect valid immediates. */
5364 if ((GET_MODE_CLASS (mode) == MODE_INT
5365 || (GET_MODE_CLASS (mode) == MODE_CC
5366 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
5367 && CONST_INT_P (op1)
5368 && aarch64_uimm12_shift (INTVAL (op1)))
5369 {
5370 *cost += rtx_cost (op0, MINUS, 0, speed);
5371
5372 if (speed)
5373 /* SUB(S) (immediate). */
5374 *cost += extra_cost->alu.arith;
5375 return true;
5376
5377 }
5378
5379 /* Look for SUB (extended register). */
5380 if (aarch64_rtx_arith_op_extract_p (op1, mode))
5381 {
5382 if (speed)
5383 *cost += extra_cost->alu.arith_shift;
5384
5385 *cost += rtx_cost (XEXP (XEXP (op1, 0), 0),
5386 (enum rtx_code) GET_CODE (op1),
5387 0, speed);
5388 return true;
5389 }
5390
5391 rtx new_op1 = aarch64_strip_extend (op1);
5392
5393 /* Cost this as an FMA-alike operation. */
5394 if ((GET_CODE (new_op1) == MULT
5395 || GET_CODE (new_op1) == ASHIFT)
5396 && code != COMPARE)
5397 {
5398 *cost += aarch64_rtx_mult_cost (new_op1, MULT,
5399 (enum rtx_code) code,
5400 speed);
5401 *cost += rtx_cost (op0, MINUS, 0, speed);
5402 return true;
5403 }
5404
5405 *cost += rtx_cost (new_op1, MINUS, 1, speed);
5406
5407 if (speed)
5408 {
5409 if (GET_MODE_CLASS (mode) == MODE_INT)
5410 /* SUB(S). */
5411 *cost += extra_cost->alu.arith;
5412 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5413 /* FSUB. */
5414 *cost += extra_cost->fp[mode == DFmode].addsub;
5415 }
5416 return true;
5417 }
5418
5419 case PLUS:
5420 {
5421 rtx new_op0;
5422
5423 op0 = XEXP (x, 0);
5424 op1 = XEXP (x, 1);
5425
5426 cost_plus:
5427 if (GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMPARE
5428 || GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMM_COMPARE)
5429 {
5430 /* CSINC. */
5431 *cost += rtx_cost (XEXP (op0, 0), PLUS, 0, speed);
5432 *cost += rtx_cost (op1, PLUS, 1, speed);
5433 return true;
5434 }
5435
5436 if (GET_MODE_CLASS (mode) == MODE_INT
5437 && CONST_INT_P (op1)
5438 && aarch64_uimm12_shift (INTVAL (op1)))
5439 {
5440 *cost += rtx_cost (op0, PLUS, 0, speed);
5441
5442 if (speed)
5443 /* ADD (immediate). */
5444 *cost += extra_cost->alu.arith;
5445 return true;
5446 }
5447
5448 /* Look for ADD (extended register). */
5449 if (aarch64_rtx_arith_op_extract_p (op0, mode))
5450 {
5451 if (speed)
5452 *cost += extra_cost->alu.arith_shift;
5453
5454 *cost += rtx_cost (XEXP (XEXP (op0, 0), 0),
5455 (enum rtx_code) GET_CODE (op0),
5456 0, speed);
5457 return true;
5458 }
5459
5460 /* Strip any extend, leave shifts behind as we will
5461 cost them through mult_cost. */
5462 new_op0 = aarch64_strip_extend (op0);
5463
5464 if (GET_CODE (new_op0) == MULT
5465 || GET_CODE (new_op0) == ASHIFT)
5466 {
5467 *cost += aarch64_rtx_mult_cost (new_op0, MULT, PLUS,
5468 speed);
5469 *cost += rtx_cost (op1, PLUS, 1, speed);
5470 return true;
5471 }
5472
5473 *cost += (rtx_cost (new_op0, PLUS, 0, speed)
5474 + rtx_cost (op1, PLUS, 1, speed));
5475
5476 if (speed)
5477 {
5478 if (GET_MODE_CLASS (mode) == MODE_INT)
5479 /* ADD. */
5480 *cost += extra_cost->alu.arith;
5481 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5482 /* FADD. */
5483 *cost += extra_cost->fp[mode == DFmode].addsub;
5484 }
5485 return true;
5486 }
5487
5488 case BSWAP:
5489 *cost = COSTS_N_INSNS (1);
5490
5491 if (speed)
5492 *cost += extra_cost->alu.rev;
5493
5494 return false;
5495
5496 case IOR:
5497 if (aarch_rev16_p (x))
5498 {
5499 *cost = COSTS_N_INSNS (1);
5500
5501 if (speed)
5502 *cost += extra_cost->alu.rev;
5503
5504 return true;
5505 }
5506 /* Fall through. */
5507 case XOR:
5508 case AND:
5509 cost_logic:
5510 op0 = XEXP (x, 0);
5511 op1 = XEXP (x, 1);
5512
5513 if (code == AND
5514 && GET_CODE (op0) == MULT
5515 && CONST_INT_P (XEXP (op0, 1))
5516 && CONST_INT_P (op1)
5517 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (op0, 1))),
5518 INTVAL (op1)) != 0)
5519 {
5520 /* This is a UBFM/SBFM. */
5521 *cost += rtx_cost (XEXP (op0, 0), ZERO_EXTRACT, 0, speed);
5522 if (speed)
5523 *cost += extra_cost->alu.bfx;
5524 return true;
5525 }
5526
5527 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
5528 {
5529 /* We possibly get the immediate for free, this is not
5530 modelled. */
5531 if (CONST_INT_P (op1)
5532 && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x)))
5533 {
5534 *cost += rtx_cost (op0, (enum rtx_code) code, 0, speed);
5535
5536 if (speed)
5537 *cost += extra_cost->alu.logical;
5538
5539 return true;
5540 }
5541 else
5542 {
5543 rtx new_op0 = op0;
5544
5545 /* Handle ORN, EON, or BIC. */
5546 if (GET_CODE (op0) == NOT)
5547 op0 = XEXP (op0, 0);
5548
5549 new_op0 = aarch64_strip_shift (op0);
5550
5551 /* If we had a shift on op0 then this is a logical-shift-
5552 by-register/immediate operation. Otherwise, this is just
5553 a logical operation. */
5554 if (speed)
5555 {
5556 if (new_op0 != op0)
5557 {
5558 /* Shift by immediate. */
5559 if (CONST_INT_P (XEXP (op0, 1)))
5560 *cost += extra_cost->alu.log_shift;
5561 else
5562 *cost += extra_cost->alu.log_shift_reg;
5563 }
5564 else
5565 *cost += extra_cost->alu.logical;
5566 }
5567
5568 /* In both cases we want to cost both operands. */
5569 *cost += rtx_cost (new_op0, (enum rtx_code) code, 0, speed)
5570 + rtx_cost (op1, (enum rtx_code) code, 1, speed);
5571
5572 return true;
5573 }
5574 }
5575 return false;
5576
5577 case NOT:
5578 /* MVN. */
5579 if (speed)
5580 *cost += extra_cost->alu.logical;
5581
5582 /* The logical instruction could have the shifted register form,
5583 but the cost is the same if the shift is processed as a separate
5584 instruction, so we don't bother with it here. */
5585 return false;
5586
5587 case ZERO_EXTEND:
5588
5589 op0 = XEXP (x, 0);
5590 /* If a value is written in SI mode, then zero extended to DI
5591 mode, the operation will in general be free as a write to
5592 a 'w' register implicitly zeroes the upper bits of an 'x'
5593 register. However, if this is
5594
5595 (set (reg) (zero_extend (reg)))
5596
5597 we must cost the explicit register move. */
5598 if (mode == DImode
5599 && GET_MODE (op0) == SImode
5600 && outer == SET)
5601 {
5602 int op_cost = rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed);
5603
5604 if (!op_cost && speed)
5605 /* MOV. */
5606 *cost += extra_cost->alu.extend;
5607 else
5608 /* Free, the cost is that of the SI mode operation. */
5609 *cost = op_cost;
5610
5611 return true;
5612 }
5613 else if (MEM_P (XEXP (x, 0)))
5614 {
5615 /* All loads can zero extend to any size for free. */
5616 *cost = rtx_cost (XEXP (x, 0), ZERO_EXTEND, param, speed);
5617 return true;
5618 }
5619
5620 /* UXTB/UXTH. */
5621 if (speed)
5622 *cost += extra_cost->alu.extend;
5623
5624 return false;
5625
5626 case SIGN_EXTEND:
5627 if (MEM_P (XEXP (x, 0)))
5628 {
5629 /* LDRSH. */
5630 if (speed)
5631 {
5632 rtx address = XEXP (XEXP (x, 0), 0);
5633 *cost += extra_cost->ldst.load_sign_extend;
5634
5635 *cost +=
5636 COSTS_N_INSNS (aarch64_address_cost (address, mode,
5637 0, speed));
5638 }
5639 return true;
5640 }
5641
5642 if (speed)
5643 *cost += extra_cost->alu.extend;
5644 return false;
5645
5646 case ASHIFT:
5647 op0 = XEXP (x, 0);
5648 op1 = XEXP (x, 1);
5649
5650 if (CONST_INT_P (op1))
5651 {
5652 /* LSL (immediate), UBMF, UBFIZ and friends. These are all
5653 aliases. */
5654 if (speed)
5655 *cost += extra_cost->alu.shift;
5656
5657 /* We can incorporate zero/sign extend for free. */
5658 if (GET_CODE (op0) == ZERO_EXTEND
5659 || GET_CODE (op0) == SIGN_EXTEND)
5660 op0 = XEXP (op0, 0);
5661
5662 *cost += rtx_cost (op0, ASHIFT, 0, speed);
5663 return true;
5664 }
5665 else
5666 {
5667 /* LSLV. */
5668 if (speed)
5669 *cost += extra_cost->alu.shift_reg;
5670
5671 return false; /* All arguments need to be in registers. */
5672 }
5673
5674 case ROTATE:
5675 case ROTATERT:
5676 case LSHIFTRT:
5677 case ASHIFTRT:
5678 op0 = XEXP (x, 0);
5679 op1 = XEXP (x, 1);
5680
5681 if (CONST_INT_P (op1))
5682 {
5683 /* ASR (immediate) and friends. */
5684 if (speed)
5685 *cost += extra_cost->alu.shift;
5686
5687 *cost += rtx_cost (op0, (enum rtx_code) code, 0, speed);
5688 return true;
5689 }
5690 else
5691 {
5692
5693 /* ASR (register) and friends. */
5694 if (speed)
5695 *cost += extra_cost->alu.shift_reg;
5696
5697 return false; /* All arguments need to be in registers. */
5698 }
5699
5700 case SYMBOL_REF:
5701
5702 if (aarch64_cmodel == AARCH64_CMODEL_LARGE)
5703 {
5704 /* LDR. */
5705 if (speed)
5706 *cost += extra_cost->ldst.load;
5707 }
5708 else if (aarch64_cmodel == AARCH64_CMODEL_SMALL
5709 || aarch64_cmodel == AARCH64_CMODEL_SMALL_PIC)
5710 {
5711 /* ADRP, followed by ADD. */
5712 *cost += COSTS_N_INSNS (1);
5713 if (speed)
5714 *cost += 2 * extra_cost->alu.arith;
5715 }
5716 else if (aarch64_cmodel == AARCH64_CMODEL_TINY
5717 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
5718 {
5719 /* ADR. */
5720 if (speed)
5721 *cost += extra_cost->alu.arith;
5722 }
5723
5724 if (flag_pic)
5725 {
5726 /* One extra load instruction, after accessing the GOT. */
5727 *cost += COSTS_N_INSNS (1);
5728 if (speed)
5729 *cost += extra_cost->ldst.load;
5730 }
5731 return true;
5732
5733 case HIGH:
5734 case LO_SUM:
5735 /* ADRP/ADD (immediate). */
5736 if (speed)
5737 *cost += extra_cost->alu.arith;
5738 return true;
5739
5740 case ZERO_EXTRACT:
5741 case SIGN_EXTRACT:
5742 /* UBFX/SBFX. */
5743 if (speed)
5744 *cost += extra_cost->alu.bfx;
5745
5746 /* We can trust that the immediates used will be correct (there
5747 are no by-register forms), so we need only cost op0. */
5748 *cost += rtx_cost (XEXP (x, 0), (enum rtx_code) code, 0, speed);
5749 return true;
5750
5751 case MULT:
5752 *cost += aarch64_rtx_mult_cost (x, MULT, 0, speed);
5753 /* aarch64_rtx_mult_cost always handles recursion to its
5754 operands. */
5755 return true;
5756
5757 case MOD:
5758 case UMOD:
5759 if (speed)
5760 {
5761 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
5762 *cost += (extra_cost->mult[GET_MODE (x) == DImode].add
5763 + extra_cost->mult[GET_MODE (x) == DImode].idiv);
5764 else if (GET_MODE (x) == DFmode)
5765 *cost += (extra_cost->fp[1].mult
5766 + extra_cost->fp[1].div);
5767 else if (GET_MODE (x) == SFmode)
5768 *cost += (extra_cost->fp[0].mult
5769 + extra_cost->fp[0].div);
5770 }
5771 return false; /* All arguments need to be in registers. */
5772
5773 case DIV:
5774 case UDIV:
5775 case SQRT:
5776 if (speed)
5777 {
5778 if (GET_MODE_CLASS (mode) == MODE_INT)
5779 /* There is no integer SQRT, so only DIV and UDIV can get
5780 here. */
5781 *cost += extra_cost->mult[mode == DImode].idiv;
5782 else
5783 *cost += extra_cost->fp[mode == DFmode].div;
5784 }
5785 return false; /* All arguments need to be in registers. */
5786
5787 case IF_THEN_ELSE:
5788 return aarch64_if_then_else_costs (XEXP (x, 0), XEXP (x, 1),
5789 XEXP (x, 2), cost, speed);
5790
5791 case EQ:
5792 case NE:
5793 case GT:
5794 case GTU:
5795 case LT:
5796 case LTU:
5797 case GE:
5798 case GEU:
5799 case LE:
5800 case LEU:
5801
5802 return false; /* All arguments must be in registers. */
5803
5804 case FMA:
5805 op0 = XEXP (x, 0);
5806 op1 = XEXP (x, 1);
5807 op2 = XEXP (x, 2);
5808
5809 if (speed)
5810 *cost += extra_cost->fp[mode == DFmode].fma;
5811
5812 /* FMSUB, FNMADD, and FNMSUB are free. */
5813 if (GET_CODE (op0) == NEG)
5814 op0 = XEXP (op0, 0);
5815
5816 if (GET_CODE (op2) == NEG)
5817 op2 = XEXP (op2, 0);
5818
5819 /* aarch64_fnma4_elt_to_64v2df has the NEG as operand 1,
5820 and the by-element operand as operand 0. */
5821 if (GET_CODE (op1) == NEG)
5822 op1 = XEXP (op1, 0);
5823
5824 /* Catch vector-by-element operations. The by-element operand can
5825 either be (vec_duplicate (vec_select (x))) or just
5826 (vec_select (x)), depending on whether we are multiplying by
5827 a vector or a scalar.
5828
5829 Canonicalization is not very good in these cases, FMA4 will put the
5830 by-element operand as operand 0, FNMA4 will have it as operand 1. */
5831 if (GET_CODE (op0) == VEC_DUPLICATE)
5832 op0 = XEXP (op0, 0);
5833 else if (GET_CODE (op1) == VEC_DUPLICATE)
5834 op1 = XEXP (op1, 0);
5835
5836 if (GET_CODE (op0) == VEC_SELECT)
5837 op0 = XEXP (op0, 0);
5838 else if (GET_CODE (op1) == VEC_SELECT)
5839 op1 = XEXP (op1, 0);
5840
5841 /* If the remaining parameters are not registers,
5842 get the cost to put them into registers. */
5843 *cost += rtx_cost (op0, FMA, 0, speed);
5844 *cost += rtx_cost (op1, FMA, 1, speed);
5845 *cost += rtx_cost (op2, FMA, 2, speed);
5846 return true;
5847
5848 case FLOAT_EXTEND:
5849 if (speed)
5850 *cost += extra_cost->fp[mode == DFmode].widen;
5851 return false;
5852
5853 case FLOAT_TRUNCATE:
5854 if (speed)
5855 *cost += extra_cost->fp[mode == DFmode].narrow;
5856 return false;
5857
5858 case FIX:
5859 case UNSIGNED_FIX:
5860 x = XEXP (x, 0);
5861 /* Strip the rounding part. They will all be implemented
5862 by the fcvt* family of instructions anyway. */
5863 if (GET_CODE (x) == UNSPEC)
5864 {
5865 unsigned int uns_code = XINT (x, 1);
5866
5867 if (uns_code == UNSPEC_FRINTA
5868 || uns_code == UNSPEC_FRINTM
5869 || uns_code == UNSPEC_FRINTN
5870 || uns_code == UNSPEC_FRINTP
5871 || uns_code == UNSPEC_FRINTZ)
5872 x = XVECEXP (x, 0, 0);
5873 }
5874
5875 if (speed)
5876 *cost += extra_cost->fp[GET_MODE (x) == DFmode].toint;
5877
5878 *cost += rtx_cost (x, (enum rtx_code) code, 0, speed);
5879 return true;
5880
5881 case ABS:
5882 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5883 {
5884 /* FABS and FNEG are analogous. */
5885 if (speed)
5886 *cost += extra_cost->fp[mode == DFmode].neg;
5887 }
5888 else
5889 {
5890 /* Integer ABS will either be split to
5891 two arithmetic instructions, or will be an ABS
5892 (scalar), which we don't model. */
5893 *cost = COSTS_N_INSNS (2);
5894 if (speed)
5895 *cost += 2 * extra_cost->alu.arith;
5896 }
5897 return false;
5898
5899 case SMAX:
5900 case SMIN:
5901 if (speed)
5902 {
5903 /* FMAXNM/FMINNM/FMAX/FMIN.
5904 TODO: This may not be accurate for all implementations, but
5905 we do not model this in the cost tables. */
5906 *cost += extra_cost->fp[mode == DFmode].addsub;
5907 }
5908 return false;
5909
5910 case UNSPEC:
5911 /* The floating point round to integer frint* instructions. */
5912 if (aarch64_frint_unspec_p (XINT (x, 1)))
5913 {
5914 if (speed)
5915 *cost += extra_cost->fp[mode == DFmode].roundint;
5916
5917 return false;
5918 }
5919
5920 if (XINT (x, 1) == UNSPEC_RBIT)
5921 {
5922 if (speed)
5923 *cost += extra_cost->alu.rev;
5924
5925 return false;
5926 }
5927 break;
5928
5929 case TRUNCATE:
5930
5931 /* Decompose <su>muldi3_highpart. */
5932 if (/* (truncate:DI */
5933 mode == DImode
5934 /* (lshiftrt:TI */
5935 && GET_MODE (XEXP (x, 0)) == TImode
5936 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
5937 /* (mult:TI */
5938 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5939 /* (ANY_EXTEND:TI (reg:DI))
5940 (ANY_EXTEND:TI (reg:DI))) */
5941 && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
5942 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == ZERO_EXTEND)
5943 || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND
5944 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND))
5945 && GET_MODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0)) == DImode
5946 && GET_MODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 1), 0)) == DImode
5947 /* (const_int 64) */
5948 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5949 && UINTVAL (XEXP (XEXP (x, 0), 1)) == 64)
5950 {
5951 /* UMULH/SMULH. */
5952 if (speed)
5953 *cost += extra_cost->mult[mode == DImode].extend;
5954 *cost += rtx_cost (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0),
5955 MULT, 0, speed);
5956 *cost += rtx_cost (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 1), 0),
5957 MULT, 1, speed);
5958 return true;
5959 }
5960
5961 /* Fall through. */
5962 default:
5963 break;
5964 }
5965
5966 if (dump_file && (dump_flags & TDF_DETAILS))
5967 fprintf (dump_file,
5968 "\nFailed to cost RTX. Assuming default cost.\n");
5969
5970 return true;
5971 }
5972
5973 /* Wrapper around aarch64_rtx_costs, dumps the partial, or total cost
5974 calculated for X. This cost is stored in *COST. Returns true
5975 if the total cost of X was calculated. */
5976 static bool
5977 aarch64_rtx_costs_wrapper (rtx x, int code, int outer,
5978 int param, int *cost, bool speed)
5979 {
5980 bool result = aarch64_rtx_costs (x, code, outer, param, cost, speed);
5981
5982 if (dump_file && (dump_flags & TDF_DETAILS))
5983 {
5984 print_rtl_single (dump_file, x);
5985 fprintf (dump_file, "\n%s cost: %d (%s)\n",
5986 speed ? "Hot" : "Cold",
5987 *cost, result ? "final" : "partial");
5988 }
5989
5990 return result;
5991 }
5992
5993 static int
5994 aarch64_register_move_cost (enum machine_mode mode,
5995 reg_class_t from_i, reg_class_t to_i)
5996 {
5997 enum reg_class from = (enum reg_class) from_i;
5998 enum reg_class to = (enum reg_class) to_i;
5999 const struct cpu_regmove_cost *regmove_cost
6000 = aarch64_tune_params->regmove_cost;
6001
6002 /* Moving between GPR and stack cost is the same as GP2GP. */
6003 if ((from == GENERAL_REGS && to == STACK_REG)
6004 || (to == GENERAL_REGS && from == STACK_REG))
6005 return regmove_cost->GP2GP;
6006
6007 /* To/From the stack register, we move via the gprs. */
6008 if (to == STACK_REG || from == STACK_REG)
6009 return aarch64_register_move_cost (mode, from, GENERAL_REGS)
6010 + aarch64_register_move_cost (mode, GENERAL_REGS, to);
6011
6012 if (from == GENERAL_REGS && to == GENERAL_REGS)
6013 return regmove_cost->GP2GP;
6014 else if (from == GENERAL_REGS)
6015 return regmove_cost->GP2FP;
6016 else if (to == GENERAL_REGS)
6017 return regmove_cost->FP2GP;
6018
6019 /* When AdvSIMD instructions are disabled it is not possible to move
6020 a 128-bit value directly between Q registers. This is handled in
6021 secondary reload. A general register is used as a scratch to move
6022 the upper DI value and the lower DI value is moved directly,
6023 hence the cost is the sum of three moves. */
6024 if (! TARGET_SIMD && GET_MODE_SIZE (mode) == 128)
6025 return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
6026
6027 return regmove_cost->FP2FP;
6028 }
6029
6030 static int
6031 aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
6032 reg_class_t rclass ATTRIBUTE_UNUSED,
6033 bool in ATTRIBUTE_UNUSED)
6034 {
6035 return aarch64_tune_params->memmov_cost;
6036 }
6037
6038 /* Return the number of instructions that can be issued per cycle. */
6039 static int
6040 aarch64_sched_issue_rate (void)
6041 {
6042 return aarch64_tune_params->issue_rate;
6043 }
6044
6045 /* Vectorizer cost model target hooks. */
6046
6047 /* Implement targetm.vectorize.builtin_vectorization_cost. */
6048 static int
6049 aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
6050 tree vectype,
6051 int misalign ATTRIBUTE_UNUSED)
6052 {
6053 unsigned elements;
6054
6055 switch (type_of_cost)
6056 {
6057 case scalar_stmt:
6058 return aarch64_tune_params->vec_costs->scalar_stmt_cost;
6059
6060 case scalar_load:
6061 return aarch64_tune_params->vec_costs->scalar_load_cost;
6062
6063 case scalar_store:
6064 return aarch64_tune_params->vec_costs->scalar_store_cost;
6065
6066 case vector_stmt:
6067 return aarch64_tune_params->vec_costs->vec_stmt_cost;
6068
6069 case vector_load:
6070 return aarch64_tune_params->vec_costs->vec_align_load_cost;
6071
6072 case vector_store:
6073 return aarch64_tune_params->vec_costs->vec_store_cost;
6074
6075 case vec_to_scalar:
6076 return aarch64_tune_params->vec_costs->vec_to_scalar_cost;
6077
6078 case scalar_to_vec:
6079 return aarch64_tune_params->vec_costs->scalar_to_vec_cost;
6080
6081 case unaligned_load:
6082 return aarch64_tune_params->vec_costs->vec_unalign_load_cost;
6083
6084 case unaligned_store:
6085 return aarch64_tune_params->vec_costs->vec_unalign_store_cost;
6086
6087 case cond_branch_taken:
6088 return aarch64_tune_params->vec_costs->cond_taken_branch_cost;
6089
6090 case cond_branch_not_taken:
6091 return aarch64_tune_params->vec_costs->cond_not_taken_branch_cost;
6092
6093 case vec_perm:
6094 case vec_promote_demote:
6095 return aarch64_tune_params->vec_costs->vec_stmt_cost;
6096
6097 case vec_construct:
6098 elements = TYPE_VECTOR_SUBPARTS (vectype);
6099 return elements / 2 + 1;
6100
6101 default:
6102 gcc_unreachable ();
6103 }
6104 }
6105
6106 /* Implement targetm.vectorize.add_stmt_cost. */
6107 static unsigned
6108 aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
6109 struct _stmt_vec_info *stmt_info, int misalign,
6110 enum vect_cost_model_location where)
6111 {
6112 unsigned *cost = (unsigned *) data;
6113 unsigned retval = 0;
6114
6115 if (flag_vect_cost_model)
6116 {
6117 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
6118 int stmt_cost =
6119 aarch64_builtin_vectorization_cost (kind, vectype, misalign);
6120
6121 /* Statements in an inner loop relative to the loop being
6122 vectorized are weighted more heavily. The value here is
6123 a function (linear for now) of the loop nest level. */
6124 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
6125 {
6126 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
6127 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
6128 unsigned nest_level = loop_depth (loop);
6129
6130 count *= nest_level;
6131 }
6132
6133 retval = (unsigned) (count * stmt_cost);
6134 cost[where] += retval;
6135 }
6136
6137 return retval;
6138 }
6139
6140 static void initialize_aarch64_code_model (void);
6141
6142 /* Parse the architecture extension string. */
6143
6144 static void
6145 aarch64_parse_extension (char *str)
6146 {
6147 /* The extension string is parsed left to right. */
6148 const struct aarch64_option_extension *opt = NULL;
6149
6150 /* Flag to say whether we are adding or removing an extension. */
6151 int adding_ext = -1;
6152
6153 while (str != NULL && *str != 0)
6154 {
6155 char *ext;
6156 size_t len;
6157
6158 str++;
6159 ext = strchr (str, '+');
6160
6161 if (ext != NULL)
6162 len = ext - str;
6163 else
6164 len = strlen (str);
6165
6166 if (len >= 2 && strncmp (str, "no", 2) == 0)
6167 {
6168 adding_ext = 0;
6169 len -= 2;
6170 str += 2;
6171 }
6172 else if (len > 0)
6173 adding_ext = 1;
6174
6175 if (len == 0)
6176 {
6177 error ("missing feature modifier after %qs", "+no");
6178 return;
6179 }
6180
6181 /* Scan over the extensions table trying to find an exact match. */
6182 for (opt = all_extensions; opt->name != NULL; opt++)
6183 {
6184 if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0)
6185 {
6186 /* Add or remove the extension. */
6187 if (adding_ext)
6188 aarch64_isa_flags |= opt->flags_on;
6189 else
6190 aarch64_isa_flags &= ~(opt->flags_off);
6191 break;
6192 }
6193 }
6194
6195 if (opt->name == NULL)
6196 {
6197 /* Extension not found in list. */
6198 error ("unknown feature modifier %qs", str);
6199 return;
6200 }
6201
6202 str = ext;
6203 };
6204
6205 return;
6206 }
6207
6208 /* Parse the ARCH string. */
6209
6210 static void
6211 aarch64_parse_arch (void)
6212 {
6213 char *ext;
6214 const struct processor *arch;
6215 char *str = (char *) alloca (strlen (aarch64_arch_string) + 1);
6216 size_t len;
6217
6218 strcpy (str, aarch64_arch_string);
6219
6220 ext = strchr (str, '+');
6221
6222 if (ext != NULL)
6223 len = ext - str;
6224 else
6225 len = strlen (str);
6226
6227 if (len == 0)
6228 {
6229 error ("missing arch name in -march=%qs", str);
6230 return;
6231 }
6232
6233 /* Loop through the list of supported ARCHs to find a match. */
6234 for (arch = all_architectures; arch->name != NULL; arch++)
6235 {
6236 if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
6237 {
6238 selected_arch = arch;
6239 aarch64_isa_flags = selected_arch->flags;
6240
6241 if (!selected_cpu)
6242 selected_cpu = &all_cores[selected_arch->core];
6243
6244 if (ext != NULL)
6245 {
6246 /* ARCH string contains at least one extension. */
6247 aarch64_parse_extension (ext);
6248 }
6249
6250 if (strcmp (selected_arch->arch, selected_cpu->arch))
6251 {
6252 warning (0, "switch -mcpu=%s conflicts with -march=%s switch",
6253 selected_cpu->name, selected_arch->name);
6254 }
6255
6256 return;
6257 }
6258 }
6259
6260 /* ARCH name not found in list. */
6261 error ("unknown value %qs for -march", str);
6262 return;
6263 }
6264
6265 /* Parse the CPU string. */
6266
6267 static void
6268 aarch64_parse_cpu (void)
6269 {
6270 char *ext;
6271 const struct processor *cpu;
6272 char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1);
6273 size_t len;
6274
6275 strcpy (str, aarch64_cpu_string);
6276
6277 ext = strchr (str, '+');
6278
6279 if (ext != NULL)
6280 len = ext - str;
6281 else
6282 len = strlen (str);
6283
6284 if (len == 0)
6285 {
6286 error ("missing cpu name in -mcpu=%qs", str);
6287 return;
6288 }
6289
6290 /* Loop through the list of supported CPUs to find a match. */
6291 for (cpu = all_cores; cpu->name != NULL; cpu++)
6292 {
6293 if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
6294 {
6295 selected_cpu = cpu;
6296 selected_tune = cpu;
6297 aarch64_isa_flags = selected_cpu->flags;
6298
6299 if (ext != NULL)
6300 {
6301 /* CPU string contains at least one extension. */
6302 aarch64_parse_extension (ext);
6303 }
6304
6305 return;
6306 }
6307 }
6308
6309 /* CPU name not found in list. */
6310 error ("unknown value %qs for -mcpu", str);
6311 return;
6312 }
6313
6314 /* Parse the TUNE string. */
6315
6316 static void
6317 aarch64_parse_tune (void)
6318 {
6319 const struct processor *cpu;
6320 char *str = (char *) alloca (strlen (aarch64_tune_string) + 1);
6321 strcpy (str, aarch64_tune_string);
6322
6323 /* Loop through the list of supported CPUs to find a match. */
6324 for (cpu = all_cores; cpu->name != NULL; cpu++)
6325 {
6326 if (strcmp (cpu->name, str) == 0)
6327 {
6328 selected_tune = cpu;
6329 return;
6330 }
6331 }
6332
6333 /* CPU name not found in list. */
6334 error ("unknown value %qs for -mtune", str);
6335 return;
6336 }
6337
6338
6339 /* Implement TARGET_OPTION_OVERRIDE. */
6340
6341 static void
6342 aarch64_override_options (void)
6343 {
6344 /* -mcpu=CPU is shorthand for -march=ARCH_FOR_CPU, -mtune=CPU.
6345 If either of -march or -mtune is given, they override their
6346 respective component of -mcpu.
6347
6348 So, first parse AARCH64_CPU_STRING, then the others, be careful
6349 with -march as, if -mcpu is not present on the command line, march
6350 must set a sensible default CPU. */
6351 if (aarch64_cpu_string)
6352 {
6353 aarch64_parse_cpu ();
6354 }
6355
6356 if (aarch64_arch_string)
6357 {
6358 aarch64_parse_arch ();
6359 }
6360
6361 if (aarch64_tune_string)
6362 {
6363 aarch64_parse_tune ();
6364 }
6365
6366 #ifndef HAVE_AS_MABI_OPTION
6367 /* The compiler may have been configured with 2.23.* binutils, which does
6368 not have support for ILP32. */
6369 if (TARGET_ILP32)
6370 error ("Assembler does not support -mabi=ilp32");
6371 #endif
6372
6373 initialize_aarch64_code_model ();
6374
6375 aarch64_build_bitmask_table ();
6376
6377 /* This target defaults to strict volatile bitfields. */
6378 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
6379 flag_strict_volatile_bitfields = 1;
6380
6381 /* If the user did not specify a processor, choose the default
6382 one for them. This will be the CPU set during configuration using
6383 --with-cpu, otherwise it is "generic". */
6384 if (!selected_cpu)
6385 {
6386 selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
6387 aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
6388 }
6389
6390 gcc_assert (selected_cpu);
6391
6392 /* The selected cpu may be an architecture, so lookup tuning by core ID. */
6393 if (!selected_tune)
6394 selected_tune = &all_cores[selected_cpu->core];
6395
6396 aarch64_tune_flags = selected_tune->flags;
6397 aarch64_tune = selected_tune->core;
6398 aarch64_tune_params = selected_tune->tune;
6399
6400 aarch64_override_options_after_change ();
6401 }
6402
6403 /* Implement targetm.override_options_after_change. */
6404
6405 static void
6406 aarch64_override_options_after_change (void)
6407 {
6408 if (flag_omit_frame_pointer)
6409 flag_omit_leaf_frame_pointer = false;
6410 else if (flag_omit_leaf_frame_pointer)
6411 flag_omit_frame_pointer = true;
6412 }
6413
6414 static struct machine_function *
6415 aarch64_init_machine_status (void)
6416 {
6417 struct machine_function *machine;
6418 machine = ggc_cleared_alloc<machine_function> ();
6419 return machine;
6420 }
6421
6422 void
6423 aarch64_init_expanders (void)
6424 {
6425 init_machine_status = aarch64_init_machine_status;
6426 }
6427
6428 /* A checking mechanism for the implementation of the various code models. */
6429 static void
6430 initialize_aarch64_code_model (void)
6431 {
6432 if (flag_pic)
6433 {
6434 switch (aarch64_cmodel_var)
6435 {
6436 case AARCH64_CMODEL_TINY:
6437 aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
6438 break;
6439 case AARCH64_CMODEL_SMALL:
6440 aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
6441 break;
6442 case AARCH64_CMODEL_LARGE:
6443 sorry ("code model %qs with -f%s", "large",
6444 flag_pic > 1 ? "PIC" : "pic");
6445 default:
6446 gcc_unreachable ();
6447 }
6448 }
6449 else
6450 aarch64_cmodel = aarch64_cmodel_var;
6451 }
6452
6453 /* Return true if SYMBOL_REF X binds locally. */
6454
6455 static bool
6456 aarch64_symbol_binds_local_p (const_rtx x)
6457 {
6458 return (SYMBOL_REF_DECL (x)
6459 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
6460 : SYMBOL_REF_LOCAL_P (x));
6461 }
6462
6463 /* Return true if SYMBOL_REF X is thread local */
6464 static bool
6465 aarch64_tls_symbol_p (rtx x)
6466 {
6467 if (! TARGET_HAVE_TLS)
6468 return false;
6469
6470 if (GET_CODE (x) != SYMBOL_REF)
6471 return false;
6472
6473 return SYMBOL_REF_TLS_MODEL (x) != 0;
6474 }
6475
6476 /* Classify a TLS symbol into one of the TLS kinds. */
6477 enum aarch64_symbol_type
6478 aarch64_classify_tls_symbol (rtx x)
6479 {
6480 enum tls_model tls_kind = tls_symbolic_operand_type (x);
6481
6482 switch (tls_kind)
6483 {
6484 case TLS_MODEL_GLOBAL_DYNAMIC:
6485 case TLS_MODEL_LOCAL_DYNAMIC:
6486 return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
6487
6488 case TLS_MODEL_INITIAL_EXEC:
6489 return SYMBOL_SMALL_GOTTPREL;
6490
6491 case TLS_MODEL_LOCAL_EXEC:
6492 return SYMBOL_SMALL_TPREL;
6493
6494 case TLS_MODEL_EMULATED:
6495 case TLS_MODEL_NONE:
6496 return SYMBOL_FORCE_TO_MEM;
6497
6498 default:
6499 gcc_unreachable ();
6500 }
6501 }
6502
6503 /* Return the method that should be used to access SYMBOL_REF or
6504 LABEL_REF X in context CONTEXT. */
6505
6506 enum aarch64_symbol_type
6507 aarch64_classify_symbol (rtx x,
6508 enum aarch64_symbol_context context ATTRIBUTE_UNUSED)
6509 {
6510 if (GET_CODE (x) == LABEL_REF)
6511 {
6512 switch (aarch64_cmodel)
6513 {
6514 case AARCH64_CMODEL_LARGE:
6515 return SYMBOL_FORCE_TO_MEM;
6516
6517 case AARCH64_CMODEL_TINY_PIC:
6518 case AARCH64_CMODEL_TINY:
6519 return SYMBOL_TINY_ABSOLUTE;
6520
6521 case AARCH64_CMODEL_SMALL_PIC:
6522 case AARCH64_CMODEL_SMALL:
6523 return SYMBOL_SMALL_ABSOLUTE;
6524
6525 default:
6526 gcc_unreachable ();
6527 }
6528 }
6529
6530 if (GET_CODE (x) == SYMBOL_REF)
6531 {
6532 if (aarch64_cmodel == AARCH64_CMODEL_LARGE)
6533 return SYMBOL_FORCE_TO_MEM;
6534
6535 if (aarch64_tls_symbol_p (x))
6536 return aarch64_classify_tls_symbol (x);
6537
6538 switch (aarch64_cmodel)
6539 {
6540 case AARCH64_CMODEL_TINY:
6541 if (SYMBOL_REF_WEAK (x))
6542 return SYMBOL_FORCE_TO_MEM;
6543 return SYMBOL_TINY_ABSOLUTE;
6544
6545 case AARCH64_CMODEL_SMALL:
6546 if (SYMBOL_REF_WEAK (x))
6547 return SYMBOL_FORCE_TO_MEM;
6548 return SYMBOL_SMALL_ABSOLUTE;
6549
6550 case AARCH64_CMODEL_TINY_PIC:
6551 if (!aarch64_symbol_binds_local_p (x))
6552 return SYMBOL_TINY_GOT;
6553 return SYMBOL_TINY_ABSOLUTE;
6554
6555 case AARCH64_CMODEL_SMALL_PIC:
6556 if (!aarch64_symbol_binds_local_p (x))
6557 return SYMBOL_SMALL_GOT;
6558 return SYMBOL_SMALL_ABSOLUTE;
6559
6560 default:
6561 gcc_unreachable ();
6562 }
6563 }
6564
6565 /* By default push everything into the constant pool. */
6566 return SYMBOL_FORCE_TO_MEM;
6567 }
6568
6569 bool
6570 aarch64_constant_address_p (rtx x)
6571 {
6572 return (CONSTANT_P (x) && memory_address_p (DImode, x));
6573 }
6574
6575 bool
6576 aarch64_legitimate_pic_operand_p (rtx x)
6577 {
6578 if (GET_CODE (x) == SYMBOL_REF
6579 || (GET_CODE (x) == CONST
6580 && GET_CODE (XEXP (x, 0)) == PLUS
6581 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
6582 return false;
6583
6584 return true;
6585 }
6586
6587 /* Return true if X holds either a quarter-precision or
6588 floating-point +0.0 constant. */
6589 static bool
6590 aarch64_valid_floating_const (enum machine_mode mode, rtx x)
6591 {
6592 if (!CONST_DOUBLE_P (x))
6593 return false;
6594
6595 /* TODO: We could handle moving 0.0 to a TFmode register,
6596 but first we would like to refactor the movtf_aarch64
6597 to be more amicable to split moves properly and
6598 correctly gate on TARGET_SIMD. For now - reject all
6599 constants which are not to SFmode or DFmode registers. */
6600 if (!(mode == SFmode || mode == DFmode))
6601 return false;
6602
6603 if (aarch64_float_const_zero_rtx_p (x))
6604 return true;
6605 return aarch64_float_const_representable_p (x);
6606 }
6607
6608 static bool
6609 aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
6610 {
6611 /* Do not allow vector struct mode constants. We could support
6612 0 and -1 easily, but they need support in aarch64-simd.md. */
6613 if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
6614 return false;
6615
6616 /* This could probably go away because
6617 we now decompose CONST_INTs according to expand_mov_immediate. */
6618 if ((GET_CODE (x) == CONST_VECTOR
6619 && aarch64_simd_valid_immediate (x, mode, false, NULL))
6620 || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
6621 return !targetm.cannot_force_const_mem (mode, x);
6622
6623 if (GET_CODE (x) == HIGH
6624 && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
6625 return true;
6626
6627 return aarch64_constant_address_p (x);
6628 }
6629
6630 rtx
6631 aarch64_load_tp (rtx target)
6632 {
6633 if (!target
6634 || GET_MODE (target) != Pmode
6635 || !register_operand (target, Pmode))
6636 target = gen_reg_rtx (Pmode);
6637
6638 /* Can return in any reg. */
6639 emit_insn (gen_aarch64_load_tp_hard (target));
6640 return target;
6641 }
6642
6643 /* On AAPCS systems, this is the "struct __va_list". */
6644 static GTY(()) tree va_list_type;
6645
6646 /* Implement TARGET_BUILD_BUILTIN_VA_LIST.
6647 Return the type to use as __builtin_va_list.
6648
6649 AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
6650
6651 struct __va_list
6652 {
6653 void *__stack;
6654 void *__gr_top;
6655 void *__vr_top;
6656 int __gr_offs;
6657 int __vr_offs;
6658 }; */
6659
6660 static tree
6661 aarch64_build_builtin_va_list (void)
6662 {
6663 tree va_list_name;
6664 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
6665
6666 /* Create the type. */
6667 va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
6668 /* Give it the required name. */
6669 va_list_name = build_decl (BUILTINS_LOCATION,
6670 TYPE_DECL,
6671 get_identifier ("__va_list"),
6672 va_list_type);
6673 DECL_ARTIFICIAL (va_list_name) = 1;
6674 TYPE_NAME (va_list_type) = va_list_name;
6675 TYPE_STUB_DECL (va_list_type) = va_list_name;
6676
6677 /* Create the fields. */
6678 f_stack = build_decl (BUILTINS_LOCATION,
6679 FIELD_DECL, get_identifier ("__stack"),
6680 ptr_type_node);
6681 f_grtop = build_decl (BUILTINS_LOCATION,
6682 FIELD_DECL, get_identifier ("__gr_top"),
6683 ptr_type_node);
6684 f_vrtop = build_decl (BUILTINS_LOCATION,
6685 FIELD_DECL, get_identifier ("__vr_top"),
6686 ptr_type_node);
6687 f_groff = build_decl (BUILTINS_LOCATION,
6688 FIELD_DECL, get_identifier ("__gr_offs"),
6689 integer_type_node);
6690 f_vroff = build_decl (BUILTINS_LOCATION,
6691 FIELD_DECL, get_identifier ("__vr_offs"),
6692 integer_type_node);
6693
6694 DECL_ARTIFICIAL (f_stack) = 1;
6695 DECL_ARTIFICIAL (f_grtop) = 1;
6696 DECL_ARTIFICIAL (f_vrtop) = 1;
6697 DECL_ARTIFICIAL (f_groff) = 1;
6698 DECL_ARTIFICIAL (f_vroff) = 1;
6699
6700 DECL_FIELD_CONTEXT (f_stack) = va_list_type;
6701 DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
6702 DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
6703 DECL_FIELD_CONTEXT (f_groff) = va_list_type;
6704 DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
6705
6706 TYPE_FIELDS (va_list_type) = f_stack;
6707 DECL_CHAIN (f_stack) = f_grtop;
6708 DECL_CHAIN (f_grtop) = f_vrtop;
6709 DECL_CHAIN (f_vrtop) = f_groff;
6710 DECL_CHAIN (f_groff) = f_vroff;
6711
6712 /* Compute its layout. */
6713 layout_type (va_list_type);
6714
6715 return va_list_type;
6716 }
6717
6718 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
6719 static void
6720 aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6721 {
6722 const CUMULATIVE_ARGS *cum;
6723 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
6724 tree stack, grtop, vrtop, groff, vroff;
6725 tree t;
6726 int gr_save_area_size;
6727 int vr_save_area_size;
6728 int vr_offset;
6729
6730 cum = &crtl->args.info;
6731 gr_save_area_size
6732 = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD;
6733 vr_save_area_size
6734 = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG;
6735
6736 if (TARGET_GENERAL_REGS_ONLY)
6737 {
6738 if (cum->aapcs_nvrn > 0)
6739 sorry ("%qs and floating point or vector arguments",
6740 "-mgeneral-regs-only");
6741 vr_save_area_size = 0;
6742 }
6743
6744 f_stack = TYPE_FIELDS (va_list_type_node);
6745 f_grtop = DECL_CHAIN (f_stack);
6746 f_vrtop = DECL_CHAIN (f_grtop);
6747 f_groff = DECL_CHAIN (f_vrtop);
6748 f_vroff = DECL_CHAIN (f_groff);
6749
6750 stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
6751 NULL_TREE);
6752 grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
6753 NULL_TREE);
6754 vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
6755 NULL_TREE);
6756 groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
6757 NULL_TREE);
6758 vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
6759 NULL_TREE);
6760
6761 /* Emit code to initialize STACK, which points to the next varargs stack
6762 argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
6763 by named arguments. STACK is 8-byte aligned. */
6764 t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
6765 if (cum->aapcs_stack_size > 0)
6766 t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
6767 t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
6768 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6769
6770 /* Emit code to initialize GRTOP, the top of the GR save area.
6771 virtual_incoming_args_rtx should have been 16 byte aligned. */
6772 t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
6773 t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
6774 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6775
6776 /* Emit code to initialize VRTOP, the top of the VR save area.
6777 This address is gr_save_area_bytes below GRTOP, rounded
6778 down to the next 16-byte boundary. */
6779 t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
6780 vr_offset = AARCH64_ROUND_UP (gr_save_area_size,
6781 STACK_BOUNDARY / BITS_PER_UNIT);
6782
6783 if (vr_offset)
6784 t = fold_build_pointer_plus_hwi (t, -vr_offset);
6785 t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
6786 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6787
6788 /* Emit code to initialize GROFF, the offset from GRTOP of the
6789 next GPR argument. */
6790 t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
6791 build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
6792 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6793
6794 /* Likewise emit code to initialize VROFF, the offset from FTOP
6795 of the next VR argument. */
6796 t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
6797 build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
6798 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6799 }
6800
6801 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
6802
6803 static tree
6804 aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6805 gimple_seq *post_p ATTRIBUTE_UNUSED)
6806 {
6807 tree addr;
6808 bool indirect_p;
6809 bool is_ha; /* is HFA or HVA. */
6810 bool dw_align; /* double-word align. */
6811 enum machine_mode ag_mode = VOIDmode;
6812 int nregs;
6813 enum machine_mode mode;
6814
6815 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
6816 tree stack, f_top, f_off, off, arg, roundup, on_stack;
6817 HOST_WIDE_INT size, rsize, adjust, align;
6818 tree t, u, cond1, cond2;
6819
6820 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6821 if (indirect_p)
6822 type = build_pointer_type (type);
6823
6824 mode = TYPE_MODE (type);
6825
6826 f_stack = TYPE_FIELDS (va_list_type_node);
6827 f_grtop = DECL_CHAIN (f_stack);
6828 f_vrtop = DECL_CHAIN (f_grtop);
6829 f_groff = DECL_CHAIN (f_vrtop);
6830 f_vroff = DECL_CHAIN (f_groff);
6831
6832 stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
6833 f_stack, NULL_TREE);
6834 size = int_size_in_bytes (type);
6835 align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
6836
6837 dw_align = false;
6838 adjust = 0;
6839 if (aarch64_vfp_is_call_or_return_candidate (mode,
6840 type,
6841 &ag_mode,
6842 &nregs,
6843 &is_ha))
6844 {
6845 /* TYPE passed in fp/simd registers. */
6846 if (TARGET_GENERAL_REGS_ONLY)
6847 sorry ("%qs and floating point or vector arguments",
6848 "-mgeneral-regs-only");
6849
6850 f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
6851 unshare_expr (valist), f_vrtop, NULL_TREE);
6852 f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
6853 unshare_expr (valist), f_vroff, NULL_TREE);
6854
6855 rsize = nregs * UNITS_PER_VREG;
6856
6857 if (is_ha)
6858 {
6859 if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
6860 adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
6861 }
6862 else if (BLOCK_REG_PADDING (mode, type, 1) == downward
6863 && size < UNITS_PER_VREG)
6864 {
6865 adjust = UNITS_PER_VREG - size;
6866 }
6867 }
6868 else
6869 {
6870 /* TYPE passed in general registers. */
6871 f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
6872 unshare_expr (valist), f_grtop, NULL_TREE);
6873 f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
6874 unshare_expr (valist), f_groff, NULL_TREE);
6875 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6876 nregs = rsize / UNITS_PER_WORD;
6877
6878 if (align > 8)
6879 dw_align = true;
6880
6881 if (BLOCK_REG_PADDING (mode, type, 1) == downward
6882 && size < UNITS_PER_WORD)
6883 {
6884 adjust = UNITS_PER_WORD - size;
6885 }
6886 }
6887
6888 /* Get a local temporary for the field value. */
6889 off = get_initialized_tmp_var (f_off, pre_p, NULL);
6890
6891 /* Emit code to branch if off >= 0. */
6892 t = build2 (GE_EXPR, boolean_type_node, off,
6893 build_int_cst (TREE_TYPE (off), 0));
6894 cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
6895
6896 if (dw_align)
6897 {
6898 /* Emit: offs = (offs + 15) & -16. */
6899 t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
6900 build_int_cst (TREE_TYPE (off), 15));
6901 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
6902 build_int_cst (TREE_TYPE (off), -16));
6903 roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
6904 }
6905 else
6906 roundup = NULL;
6907
6908 /* Update ap.__[g|v]r_offs */
6909 t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
6910 build_int_cst (TREE_TYPE (off), rsize));
6911 t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
6912
6913 /* String up. */
6914 if (roundup)
6915 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
6916
6917 /* [cond2] if (ap.__[g|v]r_offs > 0) */
6918 u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
6919 build_int_cst (TREE_TYPE (f_off), 0));
6920 cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
6921
6922 /* String up: make sure the assignment happens before the use. */
6923 t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
6924 COND_EXPR_ELSE (cond1) = t;
6925
6926 /* Prepare the trees handling the argument that is passed on the stack;
6927 the top level node will store in ON_STACK. */
6928 arg = get_initialized_tmp_var (stack, pre_p, NULL);
6929 if (align > 8)
6930 {
6931 /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
6932 t = fold_convert (intDI_type_node, arg);
6933 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
6934 build_int_cst (TREE_TYPE (t), 15));
6935 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
6936 build_int_cst (TREE_TYPE (t), -16));
6937 t = fold_convert (TREE_TYPE (arg), t);
6938 roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
6939 }
6940 else
6941 roundup = NULL;
6942 /* Advance ap.__stack */
6943 t = fold_convert (intDI_type_node, arg);
6944 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
6945 build_int_cst (TREE_TYPE (t), size + 7));
6946 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
6947 build_int_cst (TREE_TYPE (t), -8));
6948 t = fold_convert (TREE_TYPE (arg), t);
6949 t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
6950 /* String up roundup and advance. */
6951 if (roundup)
6952 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
6953 /* String up with arg */
6954 on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
6955 /* Big-endianness related address adjustment. */
6956 if (BLOCK_REG_PADDING (mode, type, 1) == downward
6957 && size < UNITS_PER_WORD)
6958 {
6959 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
6960 size_int (UNITS_PER_WORD - size));
6961 on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
6962 }
6963
6964 COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
6965 COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
6966
6967 /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
6968 t = off;
6969 if (adjust)
6970 t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
6971 build_int_cst (TREE_TYPE (off), adjust));
6972
6973 t = fold_convert (sizetype, t);
6974 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
6975
6976 if (is_ha)
6977 {
6978 /* type ha; // treat as "struct {ftype field[n];}"
6979 ... [computing offs]
6980 for (i = 0; i <nregs; ++i, offs += 16)
6981 ha.field[i] = *((ftype *)(ap.__vr_top + offs));
6982 return ha; */
6983 int i;
6984 tree tmp_ha, field_t, field_ptr_t;
6985
6986 /* Declare a local variable. */
6987 tmp_ha = create_tmp_var_raw (type, "ha");
6988 gimple_add_tmp_var (tmp_ha);
6989
6990 /* Establish the base type. */
6991 switch (ag_mode)
6992 {
6993 case SFmode:
6994 field_t = float_type_node;
6995 field_ptr_t = float_ptr_type_node;
6996 break;
6997 case DFmode:
6998 field_t = double_type_node;
6999 field_ptr_t = double_ptr_type_node;
7000 break;
7001 case TFmode:
7002 field_t = long_double_type_node;
7003 field_ptr_t = long_double_ptr_type_node;
7004 break;
7005 /* The half precision and quad precision are not fully supported yet. Enable
7006 the following code after the support is complete. Need to find the correct
7007 type node for __fp16 *. */
7008 #if 0
7009 case HFmode:
7010 field_t = float_type_node;
7011 field_ptr_t = float_ptr_type_node;
7012 break;
7013 #endif
7014 case V2SImode:
7015 case V4SImode:
7016 {
7017 tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
7018 field_t = build_vector_type_for_mode (innertype, ag_mode);
7019 field_ptr_t = build_pointer_type (field_t);
7020 }
7021 break;
7022 default:
7023 gcc_assert (0);
7024 }
7025
7026 /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
7027 tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
7028 addr = t;
7029 t = fold_convert (field_ptr_t, addr);
7030 t = build2 (MODIFY_EXPR, field_t,
7031 build1 (INDIRECT_REF, field_t, tmp_ha),
7032 build1 (INDIRECT_REF, field_t, t));
7033
7034 /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
7035 for (i = 1; i < nregs; ++i)
7036 {
7037 addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
7038 u = fold_convert (field_ptr_t, addr);
7039 u = build2 (MODIFY_EXPR, field_t,
7040 build2 (MEM_REF, field_t, tmp_ha,
7041 build_int_cst (field_ptr_t,
7042 (i *
7043 int_size_in_bytes (field_t)))),
7044 build1 (INDIRECT_REF, field_t, u));
7045 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
7046 }
7047
7048 u = fold_convert (TREE_TYPE (f_top), tmp_ha);
7049 t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
7050 }
7051
7052 COND_EXPR_ELSE (cond2) = t;
7053 addr = fold_convert (build_pointer_type (type), cond1);
7054 addr = build_va_arg_indirect_ref (addr);
7055
7056 if (indirect_p)
7057 addr = build_va_arg_indirect_ref (addr);
7058
7059 return addr;
7060 }
7061
7062 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
7063
7064 static void
7065 aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
7066 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7067 int no_rtl)
7068 {
7069 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7070 CUMULATIVE_ARGS local_cum;
7071 int gr_saved, vr_saved;
7072
7073 /* The caller has advanced CUM up to, but not beyond, the last named
7074 argument. Advance a local copy of CUM past the last "real" named
7075 argument, to find out how many registers are left over. */
7076 local_cum = *cum;
7077 aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
7078
7079 /* Found out how many registers we need to save. */
7080 gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn;
7081 vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn;
7082
7083 if (TARGET_GENERAL_REGS_ONLY)
7084 {
7085 if (local_cum.aapcs_nvrn > 0)
7086 sorry ("%qs and floating point or vector arguments",
7087 "-mgeneral-regs-only");
7088 vr_saved = 0;
7089 }
7090
7091 if (!no_rtl)
7092 {
7093 if (gr_saved > 0)
7094 {
7095 rtx ptr, mem;
7096
7097 /* virtual_incoming_args_rtx should have been 16-byte aligned. */
7098 ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
7099 - gr_saved * UNITS_PER_WORD);
7100 mem = gen_frame_mem (BLKmode, ptr);
7101 set_mem_alias_set (mem, get_varargs_alias_set ());
7102
7103 move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
7104 mem, gr_saved);
7105 }
7106 if (vr_saved > 0)
7107 {
7108 /* We can't use move_block_from_reg, because it will use
7109 the wrong mode, storing D regs only. */
7110 enum machine_mode mode = TImode;
7111 int off, i;
7112
7113 /* Set OFF to the offset from virtual_incoming_args_rtx of
7114 the first vector register. The VR save area lies below
7115 the GR one, and is aligned to 16 bytes. */
7116 off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
7117 STACK_BOUNDARY / BITS_PER_UNIT);
7118 off -= vr_saved * UNITS_PER_VREG;
7119
7120 for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i)
7121 {
7122 rtx ptr, mem;
7123
7124 ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
7125 mem = gen_frame_mem (mode, ptr);
7126 set_mem_alias_set (mem, get_varargs_alias_set ());
7127 aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i));
7128 off += UNITS_PER_VREG;
7129 }
7130 }
7131 }
7132
7133 /* We don't save the size into *PRETEND_SIZE because we want to avoid
7134 any complication of having crtl->args.pretend_args_size changed. */
7135 cfun->machine->frame.saved_varargs_size
7136 = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
7137 STACK_BOUNDARY / BITS_PER_UNIT)
7138 + vr_saved * UNITS_PER_VREG);
7139 }
7140
7141 static void
7142 aarch64_conditional_register_usage (void)
7143 {
7144 int i;
7145 if (!TARGET_FLOAT)
7146 {
7147 for (i = V0_REGNUM; i <= V31_REGNUM; i++)
7148 {
7149 fixed_regs[i] = 1;
7150 call_used_regs[i] = 1;
7151 }
7152 }
7153 }
7154
7155 /* Walk down the type tree of TYPE counting consecutive base elements.
7156 If *MODEP is VOIDmode, then set it to the first valid floating point
7157 type. If a non-floating point type is found, or if a floating point
7158 type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
7159 otherwise return the count in the sub-tree. */
7160 static int
7161 aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
7162 {
7163 enum machine_mode mode;
7164 HOST_WIDE_INT size;
7165
7166 switch (TREE_CODE (type))
7167 {
7168 case REAL_TYPE:
7169 mode = TYPE_MODE (type);
7170 if (mode != DFmode && mode != SFmode && mode != TFmode)
7171 return -1;
7172
7173 if (*modep == VOIDmode)
7174 *modep = mode;
7175
7176 if (*modep == mode)
7177 return 1;
7178
7179 break;
7180
7181 case COMPLEX_TYPE:
7182 mode = TYPE_MODE (TREE_TYPE (type));
7183 if (mode != DFmode && mode != SFmode && mode != TFmode)
7184 return -1;
7185
7186 if (*modep == VOIDmode)
7187 *modep = mode;
7188
7189 if (*modep == mode)
7190 return 2;
7191
7192 break;
7193
7194 case VECTOR_TYPE:
7195 /* Use V2SImode and V4SImode as representatives of all 64-bit
7196 and 128-bit vector types. */
7197 size = int_size_in_bytes (type);
7198 switch (size)
7199 {
7200 case 8:
7201 mode = V2SImode;
7202 break;
7203 case 16:
7204 mode = V4SImode;
7205 break;
7206 default:
7207 return -1;
7208 }
7209
7210 if (*modep == VOIDmode)
7211 *modep = mode;
7212
7213 /* Vector modes are considered to be opaque: two vectors are
7214 equivalent for the purposes of being homogeneous aggregates
7215 if they are the same size. */
7216 if (*modep == mode)
7217 return 1;
7218
7219 break;
7220
7221 case ARRAY_TYPE:
7222 {
7223 int count;
7224 tree index = TYPE_DOMAIN (type);
7225
7226 /* Can't handle incomplete types nor sizes that are not
7227 fixed. */
7228 if (!COMPLETE_TYPE_P (type)
7229 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
7230 return -1;
7231
7232 count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
7233 if (count == -1
7234 || !index
7235 || !TYPE_MAX_VALUE (index)
7236 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
7237 || !TYPE_MIN_VALUE (index)
7238 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
7239 || count < 0)
7240 return -1;
7241
7242 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
7243 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
7244
7245 /* There must be no padding. */
7246 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
7247 return -1;
7248
7249 return count;
7250 }
7251
7252 case RECORD_TYPE:
7253 {
7254 int count = 0;
7255 int sub_count;
7256 tree field;
7257
7258 /* Can't handle incomplete types nor sizes that are not
7259 fixed. */
7260 if (!COMPLETE_TYPE_P (type)
7261 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
7262 return -1;
7263
7264 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7265 {
7266 if (TREE_CODE (field) != FIELD_DECL)
7267 continue;
7268
7269 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
7270 if (sub_count < 0)
7271 return -1;
7272 count += sub_count;
7273 }
7274
7275 /* There must be no padding. */
7276 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
7277 return -1;
7278
7279 return count;
7280 }
7281
7282 case UNION_TYPE:
7283 case QUAL_UNION_TYPE:
7284 {
7285 /* These aren't very interesting except in a degenerate case. */
7286 int count = 0;
7287 int sub_count;
7288 tree field;
7289
7290 /* Can't handle incomplete types nor sizes that are not
7291 fixed. */
7292 if (!COMPLETE_TYPE_P (type)
7293 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
7294 return -1;
7295
7296 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7297 {
7298 if (TREE_CODE (field) != FIELD_DECL)
7299 continue;
7300
7301 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
7302 if (sub_count < 0)
7303 return -1;
7304 count = count > sub_count ? count : sub_count;
7305 }
7306
7307 /* There must be no padding. */
7308 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
7309 return -1;
7310
7311 return count;
7312 }
7313
7314 default:
7315 break;
7316 }
7317
7318 return -1;
7319 }
7320
7321 /* Return true if we use LRA instead of reload pass. */
7322 static bool
7323 aarch64_lra_p (void)
7324 {
7325 return aarch64_lra_flag;
7326 }
7327
7328 /* Return TRUE if the type, as described by TYPE and MODE, is a composite
7329 type as described in AAPCS64 \S 4.3. This includes aggregate, union and
7330 array types. The C99 floating-point complex types are also considered
7331 as composite types, according to AAPCS64 \S 7.1.1. The complex integer
7332 types, which are GCC extensions and out of the scope of AAPCS64, are
7333 treated as composite types here as well.
7334
7335 Note that MODE itself is not sufficient in determining whether a type
7336 is such a composite type or not. This is because
7337 stor-layout.c:compute_record_mode may have already changed the MODE
7338 (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
7339 structure with only one field may have its MODE set to the mode of the
7340 field. Also an integer mode whose size matches the size of the
7341 RECORD_TYPE type may be used to substitute the original mode
7342 (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
7343 solely relied on. */
7344
7345 static bool
7346 aarch64_composite_type_p (const_tree type,
7347 enum machine_mode mode)
7348 {
7349 if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
7350 return true;
7351
7352 if (mode == BLKmode
7353 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7354 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
7355 return true;
7356
7357 return false;
7358 }
7359
7360 /* Return TRUE if the type, as described by TYPE and MODE, is a short vector
7361 type as described in AAPCS64 \S 4.1.2.
7362
7363 See the comment above aarch64_composite_type_p for the notes on MODE. */
7364
7365 static bool
7366 aarch64_short_vector_p (const_tree type,
7367 enum machine_mode mode)
7368 {
7369 HOST_WIDE_INT size = -1;
7370
7371 if (type && TREE_CODE (type) == VECTOR_TYPE)
7372 size = int_size_in_bytes (type);
7373 else if (!aarch64_composite_type_p (type, mode)
7374 && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7375 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT))
7376 size = GET_MODE_SIZE (mode);
7377
7378 return (size == 8 || size == 16) ? true : false;
7379 }
7380
7381 /* Return TRUE if an argument, whose type is described by TYPE and MODE,
7382 shall be passed or returned in simd/fp register(s) (providing these
7383 parameter passing registers are available).
7384
7385 Upon successful return, *COUNT returns the number of needed registers,
7386 *BASE_MODE returns the mode of the individual register and when IS_HAF
7387 is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
7388 floating-point aggregate or a homogeneous short-vector aggregate. */
7389
7390 static bool
7391 aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode,
7392 const_tree type,
7393 enum machine_mode *base_mode,
7394 int *count,
7395 bool *is_ha)
7396 {
7397 enum machine_mode new_mode = VOIDmode;
7398 bool composite_p = aarch64_composite_type_p (type, mode);
7399
7400 if (is_ha != NULL) *is_ha = false;
7401
7402 if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
7403 || aarch64_short_vector_p (type, mode))
7404 {
7405 *count = 1;
7406 new_mode = mode;
7407 }
7408 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7409 {
7410 if (is_ha != NULL) *is_ha = true;
7411 *count = 2;
7412 new_mode = GET_MODE_INNER (mode);
7413 }
7414 else if (type && composite_p)
7415 {
7416 int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
7417
7418 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
7419 {
7420 if (is_ha != NULL) *is_ha = true;
7421 *count = ag_count;
7422 }
7423 else
7424 return false;
7425 }
7426 else
7427 return false;
7428
7429 *base_mode = new_mode;
7430 return true;
7431 }
7432
7433 /* Implement TARGET_STRUCT_VALUE_RTX. */
7434
7435 static rtx
7436 aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
7437 int incoming ATTRIBUTE_UNUSED)
7438 {
7439 return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
7440 }
7441
7442 /* Implements target hook vector_mode_supported_p. */
7443 static bool
7444 aarch64_vector_mode_supported_p (enum machine_mode mode)
7445 {
7446 if (TARGET_SIMD
7447 && (mode == V4SImode || mode == V8HImode
7448 || mode == V16QImode || mode == V2DImode
7449 || mode == V2SImode || mode == V4HImode
7450 || mode == V8QImode || mode == V2SFmode
7451 || mode == V4SFmode || mode == V2DFmode
7452 || mode == V1DFmode))
7453 return true;
7454
7455 return false;
7456 }
7457
7458 /* Return appropriate SIMD container
7459 for MODE within a vector of WIDTH bits. */
7460 static enum machine_mode
7461 aarch64_simd_container_mode (enum machine_mode mode, unsigned width)
7462 {
7463 gcc_assert (width == 64 || width == 128);
7464 if (TARGET_SIMD)
7465 {
7466 if (width == 128)
7467 switch (mode)
7468 {
7469 case DFmode:
7470 return V2DFmode;
7471 case SFmode:
7472 return V4SFmode;
7473 case SImode:
7474 return V4SImode;
7475 case HImode:
7476 return V8HImode;
7477 case QImode:
7478 return V16QImode;
7479 case DImode:
7480 return V2DImode;
7481 default:
7482 break;
7483 }
7484 else
7485 switch (mode)
7486 {
7487 case SFmode:
7488 return V2SFmode;
7489 case SImode:
7490 return V2SImode;
7491 case HImode:
7492 return V4HImode;
7493 case QImode:
7494 return V8QImode;
7495 default:
7496 break;
7497 }
7498 }
7499 return word_mode;
7500 }
7501
7502 /* Return 128-bit container as the preferred SIMD mode for MODE. */
7503 static enum machine_mode
7504 aarch64_preferred_simd_mode (enum machine_mode mode)
7505 {
7506 return aarch64_simd_container_mode (mode, 128);
7507 }
7508
7509 /* Return the bitmask of possible vector sizes for the vectorizer
7510 to iterate over. */
7511 static unsigned int
7512 aarch64_autovectorize_vector_sizes (void)
7513 {
7514 return (16 | 8);
7515 }
7516
7517 /* A table to help perform AArch64-specific name mangling for AdvSIMD
7518 vector types in order to conform to the AAPCS64 (see "Procedure
7519 Call Standard for the ARM 64-bit Architecture", Appendix A). To
7520 qualify for emission with the mangled names defined in that document,
7521 a vector type must not only be of the correct mode but also be
7522 composed of AdvSIMD vector element types (e.g.
7523 _builtin_aarch64_simd_qi); these types are registered by
7524 aarch64_init_simd_builtins (). In other words, vector types defined
7525 in other ways e.g. via vector_size attribute will get default
7526 mangled names. */
7527 typedef struct
7528 {
7529 enum machine_mode mode;
7530 const char *element_type_name;
7531 const char *mangled_name;
7532 } aarch64_simd_mangle_map_entry;
7533
7534 static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
7535 /* 64-bit containerized types. */
7536 { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
7537 { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
7538 { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
7539 { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
7540 { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
7541 { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
7542 { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
7543 { DImode, "__builtin_aarch64_simd_di", "11__Int64x1_t" },
7544 { DImode, "__builtin_aarch64_simd_udi", "12__Uint64x1_t" },
7545 { V1DFmode, "__builtin_aarch64_simd_df", "13__Float64x1_t" },
7546 { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
7547 { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
7548 /* 128-bit containerized types. */
7549 { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
7550 { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
7551 { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
7552 { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
7553 { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
7554 { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
7555 { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
7556 { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
7557 { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
7558 { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
7559 { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
7560 { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
7561 { V2DImode, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" },
7562 { VOIDmode, NULL, NULL }
7563 };
7564
7565 /* Implement TARGET_MANGLE_TYPE. */
7566
7567 static const char *
7568 aarch64_mangle_type (const_tree type)
7569 {
7570 /* The AArch64 ABI documents say that "__va_list" has to be
7571 managled as if it is in the "std" namespace. */
7572 if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
7573 return "St9__va_list";
7574
7575 /* Check the mode of the vector type, and the name of the vector
7576 element type, against the table. */
7577 if (TREE_CODE (type) == VECTOR_TYPE)
7578 {
7579 aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
7580
7581 while (pos->mode != VOIDmode)
7582 {
7583 tree elt_type = TREE_TYPE (type);
7584
7585 if (pos->mode == TYPE_MODE (type)
7586 && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
7587 && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
7588 pos->element_type_name))
7589 return pos->mangled_name;
7590
7591 pos++;
7592 }
7593 }
7594
7595 /* Use the default mangling. */
7596 return NULL;
7597 }
7598
7599 /* Return the equivalent letter for size. */
7600 static char
7601 sizetochar (int size)
7602 {
7603 switch (size)
7604 {
7605 case 64: return 'd';
7606 case 32: return 's';
7607 case 16: return 'h';
7608 case 8 : return 'b';
7609 default: gcc_unreachable ();
7610 }
7611 }
7612
7613 /* Return true iff x is a uniform vector of floating-point
7614 constants, and the constant can be represented in
7615 quarter-precision form. Note, as aarch64_float_const_representable
7616 rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */
7617 static bool
7618 aarch64_vect_float_const_representable_p (rtx x)
7619 {
7620 int i = 0;
7621 REAL_VALUE_TYPE r0, ri;
7622 rtx x0, xi;
7623
7624 if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT)
7625 return false;
7626
7627 x0 = CONST_VECTOR_ELT (x, 0);
7628 if (!CONST_DOUBLE_P (x0))
7629 return false;
7630
7631 REAL_VALUE_FROM_CONST_DOUBLE (r0, x0);
7632
7633 for (i = 1; i < CONST_VECTOR_NUNITS (x); i++)
7634 {
7635 xi = CONST_VECTOR_ELT (x, i);
7636 if (!CONST_DOUBLE_P (xi))
7637 return false;
7638
7639 REAL_VALUE_FROM_CONST_DOUBLE (ri, xi);
7640 if (!REAL_VALUES_EQUAL (r0, ri))
7641 return false;
7642 }
7643
7644 return aarch64_float_const_representable_p (x0);
7645 }
7646
7647 /* Return true for valid and false for invalid. */
7648 bool
7649 aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, bool inverse,
7650 struct simd_immediate_info *info)
7651 {
7652 #define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
7653 matches = 1; \
7654 for (i = 0; i < idx; i += (STRIDE)) \
7655 if (!(TEST)) \
7656 matches = 0; \
7657 if (matches) \
7658 { \
7659 immtype = (CLASS); \
7660 elsize = (ELSIZE); \
7661 eshift = (SHIFT); \
7662 emvn = (NEG); \
7663 break; \
7664 }
7665
7666 unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
7667 unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
7668 unsigned char bytes[16];
7669 int immtype = -1, matches;
7670 unsigned int invmask = inverse ? 0xff : 0;
7671 int eshift, emvn;
7672
7673 if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7674 {
7675 if (! (aarch64_simd_imm_zero_p (op, mode)
7676 || aarch64_vect_float_const_representable_p (op)))
7677 return false;
7678
7679 if (info)
7680 {
7681 info->value = CONST_VECTOR_ELT (op, 0);
7682 info->element_width = GET_MODE_BITSIZE (GET_MODE (info->value));
7683 info->mvn = false;
7684 info->shift = 0;
7685 }
7686
7687 return true;
7688 }
7689
7690 /* Splat vector constant out into a byte vector. */
7691 for (i = 0; i < n_elts; i++)
7692 {
7693 /* The vector is provided in gcc endian-neutral fashion. For aarch64_be,
7694 it must be laid out in the vector register in reverse order. */
7695 rtx el = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? (n_elts - 1 - i) : i);
7696 unsigned HOST_WIDE_INT elpart;
7697 unsigned int part, parts;
7698
7699 if (CONST_INT_P (el))
7700 {
7701 elpart = INTVAL (el);
7702 parts = 1;
7703 }
7704 else if (GET_CODE (el) == CONST_DOUBLE)
7705 {
7706 elpart = CONST_DOUBLE_LOW (el);
7707 parts = 2;
7708 }
7709 else
7710 gcc_unreachable ();
7711
7712 for (part = 0; part < parts; part++)
7713 {
7714 unsigned int byte;
7715 for (byte = 0; byte < innersize; byte++)
7716 {
7717 bytes[idx++] = (elpart & 0xff) ^ invmask;
7718 elpart >>= BITS_PER_UNIT;
7719 }
7720 if (GET_CODE (el) == CONST_DOUBLE)
7721 elpart = CONST_DOUBLE_HIGH (el);
7722 }
7723 }
7724
7725 /* Sanity check. */
7726 gcc_assert (idx == GET_MODE_SIZE (mode));
7727
7728 do
7729 {
7730 CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
7731 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
7732
7733 CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
7734 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
7735
7736 CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
7737 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
7738
7739 CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
7740 && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
7741
7742 CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
7743
7744 CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
7745
7746 CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
7747 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
7748
7749 CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
7750 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
7751
7752 CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
7753 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
7754
7755 CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
7756 && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
7757
7758 CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
7759
7760 CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
7761
7762 CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
7763 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
7764
7765 CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
7766 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
7767
7768 CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
7769 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
7770
7771 CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
7772 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
7773
7774 CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
7775
7776 CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
7777 && bytes[i] == bytes[(i + 8) % idx], 0, 0);
7778 }
7779 while (0);
7780
7781 if (immtype == -1)
7782 return false;
7783
7784 if (info)
7785 {
7786 info->element_width = elsize;
7787 info->mvn = emvn != 0;
7788 info->shift = eshift;
7789
7790 unsigned HOST_WIDE_INT imm = 0;
7791
7792 if (immtype >= 12 && immtype <= 15)
7793 info->msl = true;
7794
7795 /* Un-invert bytes of recognized vector, if necessary. */
7796 if (invmask != 0)
7797 for (i = 0; i < idx; i++)
7798 bytes[i] ^= invmask;
7799
7800 if (immtype == 17)
7801 {
7802 /* FIXME: Broken on 32-bit H_W_I hosts. */
7803 gcc_assert (sizeof (HOST_WIDE_INT) == 8);
7804
7805 for (i = 0; i < 8; i++)
7806 imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
7807 << (i * BITS_PER_UNIT);
7808
7809
7810 info->value = GEN_INT (imm);
7811 }
7812 else
7813 {
7814 for (i = 0; i < elsize / BITS_PER_UNIT; i++)
7815 imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
7816
7817 /* Construct 'abcdefgh' because the assembler cannot handle
7818 generic constants. */
7819 if (info->mvn)
7820 imm = ~imm;
7821 imm = (imm >> info->shift) & 0xff;
7822 info->value = GEN_INT (imm);
7823 }
7824 }
7825
7826 return true;
7827 #undef CHECK
7828 }
7829
7830 static bool
7831 aarch64_const_vec_all_same_int_p (rtx x,
7832 HOST_WIDE_INT minval,
7833 HOST_WIDE_INT maxval)
7834 {
7835 HOST_WIDE_INT firstval;
7836 int count, i;
7837
7838 if (GET_CODE (x) != CONST_VECTOR
7839 || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
7840 return false;
7841
7842 firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
7843 if (firstval < minval || firstval > maxval)
7844 return false;
7845
7846 count = CONST_VECTOR_NUNITS (x);
7847 for (i = 1; i < count; i++)
7848 if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
7849 return false;
7850
7851 return true;
7852 }
7853
7854 /* Check of immediate shift constants are within range. */
7855 bool
7856 aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left)
7857 {
7858 int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
7859 if (left)
7860 return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1);
7861 else
7862 return aarch64_const_vec_all_same_int_p (x, 1, bit_width);
7863 }
7864
7865 /* Return true if X is a uniform vector where all elements
7866 are either the floating-point constant 0.0 or the
7867 integer constant 0. */
7868 bool
7869 aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode)
7870 {
7871 return x == CONST0_RTX (mode);
7872 }
7873
7874 bool
7875 aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
7876 {
7877 HOST_WIDE_INT imm = INTVAL (x);
7878 int i;
7879
7880 for (i = 0; i < 8; i++)
7881 {
7882 unsigned int byte = imm & 0xff;
7883 if (byte != 0xff && byte != 0)
7884 return false;
7885 imm >>= 8;
7886 }
7887
7888 return true;
7889 }
7890
7891 bool
7892 aarch64_mov_operand_p (rtx x,
7893 enum aarch64_symbol_context context,
7894 enum machine_mode mode)
7895 {
7896 if (GET_CODE (x) == HIGH
7897 && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
7898 return true;
7899
7900 if (CONST_INT_P (x) && aarch64_move_imm (INTVAL (x), mode))
7901 return true;
7902
7903 if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x))
7904 return true;
7905
7906 return aarch64_classify_symbolic_expression (x, context)
7907 == SYMBOL_TINY_ABSOLUTE;
7908 }
7909
7910 /* Return a const_int vector of VAL. */
7911 rtx
7912 aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val)
7913 {
7914 int nunits = GET_MODE_NUNITS (mode);
7915 rtvec v = rtvec_alloc (nunits);
7916 int i;
7917
7918 for (i=0; i < nunits; i++)
7919 RTVEC_ELT (v, i) = GEN_INT (val);
7920
7921 return gen_rtx_CONST_VECTOR (mode, v);
7922 }
7923
7924 /* Check OP is a legal scalar immediate for the MOVI instruction. */
7925
7926 bool
7927 aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode)
7928 {
7929 enum machine_mode vmode;
7930
7931 gcc_assert (!VECTOR_MODE_P (mode));
7932 vmode = aarch64_preferred_simd_mode (mode);
7933 rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
7934 return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
7935 }
7936
7937 /* Construct and return a PARALLEL RTX vector with elements numbering the
7938 lanes of either the high (HIGH == TRUE) or low (HIGH == FALSE) half of
7939 the vector - from the perspective of the architecture. This does not
7940 line up with GCC's perspective on lane numbers, so we end up with
7941 different masks depending on our target endian-ness. The diagram
7942 below may help. We must draw the distinction when building masks
7943 which select one half of the vector. An instruction selecting
7944 architectural low-lanes for a big-endian target, must be described using
7945 a mask selecting GCC high-lanes.
7946
7947 Big-Endian Little-Endian
7948
7949 GCC 0 1 2 3 3 2 1 0
7950 | x | x | x | x | | x | x | x | x |
7951 Architecture 3 2 1 0 3 2 1 0
7952
7953 Low Mask: { 2, 3 } { 0, 1 }
7954 High Mask: { 0, 1 } { 2, 3 }
7955 */
7956
7957 rtx
7958 aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high)
7959 {
7960 int nunits = GET_MODE_NUNITS (mode);
7961 rtvec v = rtvec_alloc (nunits / 2);
7962 int high_base = nunits / 2;
7963 int low_base = 0;
7964 int base;
7965 rtx t1;
7966 int i;
7967
7968 if (BYTES_BIG_ENDIAN)
7969 base = high ? low_base : high_base;
7970 else
7971 base = high ? high_base : low_base;
7972
7973 for (i = 0; i < nunits / 2; i++)
7974 RTVEC_ELT (v, i) = GEN_INT (base + i);
7975
7976 t1 = gen_rtx_PARALLEL (mode, v);
7977 return t1;
7978 }
7979
7980 /* Check OP for validity as a PARALLEL RTX vector with elements
7981 numbering the lanes of either the high (HIGH == TRUE) or low lanes,
7982 from the perspective of the architecture. See the diagram above
7983 aarch64_simd_vect_par_cnst_half for more details. */
7984
7985 bool
7986 aarch64_simd_check_vect_par_cnst_half (rtx op, enum machine_mode mode,
7987 bool high)
7988 {
7989 rtx ideal = aarch64_simd_vect_par_cnst_half (mode, high);
7990 HOST_WIDE_INT count_op = XVECLEN (op, 0);
7991 HOST_WIDE_INT count_ideal = XVECLEN (ideal, 0);
7992 int i = 0;
7993
7994 if (!VECTOR_MODE_P (mode))
7995 return false;
7996
7997 if (count_op != count_ideal)
7998 return false;
7999
8000 for (i = 0; i < count_ideal; i++)
8001 {
8002 rtx elt_op = XVECEXP (op, 0, i);
8003 rtx elt_ideal = XVECEXP (ideal, 0, i);
8004
8005 if (!CONST_INT_P (elt_op)
8006 || INTVAL (elt_ideal) != INTVAL (elt_op))
8007 return false;
8008 }
8009 return true;
8010 }
8011
8012 /* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
8013 HIGH (exclusive). */
8014 void
8015 aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
8016 {
8017 HOST_WIDE_INT lane;
8018 gcc_assert (CONST_INT_P (operand));
8019 lane = INTVAL (operand);
8020
8021 if (lane < low || lane >= high)
8022 error ("lane out of range");
8023 }
8024
8025 void
8026 aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
8027 {
8028 gcc_assert (CONST_INT_P (operand));
8029 HOST_WIDE_INT lane = INTVAL (operand);
8030
8031 if (lane < low || lane >= high)
8032 error ("constant out of range");
8033 }
8034
8035 /* Emit code to reinterpret one AdvSIMD type as another,
8036 without altering bits. */
8037 void
8038 aarch64_simd_reinterpret (rtx dest, rtx src)
8039 {
8040 emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
8041 }
8042
8043 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
8044 registers). */
8045 void
8046 aarch64_simd_emit_pair_result_insn (enum machine_mode mode,
8047 rtx (*intfn) (rtx, rtx, rtx), rtx destaddr,
8048 rtx op1)
8049 {
8050 rtx mem = gen_rtx_MEM (mode, destaddr);
8051 rtx tmp1 = gen_reg_rtx (mode);
8052 rtx tmp2 = gen_reg_rtx (mode);
8053
8054 emit_insn (intfn (tmp1, op1, tmp2));
8055
8056 emit_move_insn (mem, tmp1);
8057 mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
8058 emit_move_insn (mem, tmp2);
8059 }
8060
8061 /* Return TRUE if OP is a valid vector addressing mode. */
8062 bool
8063 aarch64_simd_mem_operand_p (rtx op)
8064 {
8065 return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
8066 || REG_P (XEXP (op, 0)));
8067 }
8068
8069 /* Set up OPERANDS for a register copy from SRC to DEST, taking care
8070 not to early-clobber SRC registers in the process.
8071
8072 We assume that the operands described by SRC and DEST represent a
8073 decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
8074 number of components into which the copy has been decomposed. */
8075 void
8076 aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
8077 rtx *src, unsigned int count)
8078 {
8079 unsigned int i;
8080
8081 if (!reg_overlap_mentioned_p (operands[0], operands[1])
8082 || REGNO (operands[0]) < REGNO (operands[1]))
8083 {
8084 for (i = 0; i < count; i++)
8085 {
8086 operands[2 * i] = dest[i];
8087 operands[2 * i + 1] = src[i];
8088 }
8089 }
8090 else
8091 {
8092 for (i = 0; i < count; i++)
8093 {
8094 operands[2 * i] = dest[count - i - 1];
8095 operands[2 * i + 1] = src[count - i - 1];
8096 }
8097 }
8098 }
8099
8100 /* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
8101 one of VSTRUCT modes: OI, CI or XI. */
8102 int
8103 aarch64_simd_attr_length_move (rtx insn)
8104 {
8105 enum machine_mode mode;
8106
8107 extract_insn_cached (insn);
8108
8109 if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
8110 {
8111 mode = GET_MODE (recog_data.operand[0]);
8112 switch (mode)
8113 {
8114 case OImode:
8115 return 8;
8116 case CImode:
8117 return 12;
8118 case XImode:
8119 return 16;
8120 default:
8121 gcc_unreachable ();
8122 }
8123 }
8124 return 4;
8125 }
8126
8127 /* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum
8128 alignment of a vector to 128 bits. */
8129 static HOST_WIDE_INT
8130 aarch64_simd_vector_alignment (const_tree type)
8131 {
8132 HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
8133 return MIN (align, 128);
8134 }
8135
8136 /* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */
8137 static bool
8138 aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed)
8139 {
8140 if (is_packed)
8141 return false;
8142
8143 /* We guarantee alignment for vectors up to 128-bits. */
8144 if (tree_int_cst_compare (TYPE_SIZE (type),
8145 bitsize_int (BIGGEST_ALIGNMENT)) > 0)
8146 return false;
8147
8148 /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
8149 return true;
8150 }
8151
8152 /* If VALS is a vector constant that can be loaded into a register
8153 using DUP, generate instructions to do so and return an RTX to
8154 assign to the register. Otherwise return NULL_RTX. */
8155 static rtx
8156 aarch64_simd_dup_constant (rtx vals)
8157 {
8158 enum machine_mode mode = GET_MODE (vals);
8159 enum machine_mode inner_mode = GET_MODE_INNER (mode);
8160 int n_elts = GET_MODE_NUNITS (mode);
8161 bool all_same = true;
8162 rtx x;
8163 int i;
8164
8165 if (GET_CODE (vals) != CONST_VECTOR)
8166 return NULL_RTX;
8167
8168 for (i = 1; i < n_elts; ++i)
8169 {
8170 x = CONST_VECTOR_ELT (vals, i);
8171 if (!rtx_equal_p (x, CONST_VECTOR_ELT (vals, 0)))
8172 all_same = false;
8173 }
8174
8175 if (!all_same)
8176 return NULL_RTX;
8177
8178 /* We can load this constant by using DUP and a constant in a
8179 single ARM register. This will be cheaper than a vector
8180 load. */
8181 x = copy_to_mode_reg (inner_mode, CONST_VECTOR_ELT (vals, 0));
8182 return gen_rtx_VEC_DUPLICATE (mode, x);
8183 }
8184
8185
8186 /* Generate code to load VALS, which is a PARALLEL containing only
8187 constants (for vec_init) or CONST_VECTOR, efficiently into a
8188 register. Returns an RTX to copy into the register, or NULL_RTX
8189 for a PARALLEL that can not be converted into a CONST_VECTOR. */
8190 static rtx
8191 aarch64_simd_make_constant (rtx vals)
8192 {
8193 enum machine_mode mode = GET_MODE (vals);
8194 rtx const_dup;
8195 rtx const_vec = NULL_RTX;
8196 int n_elts = GET_MODE_NUNITS (mode);
8197 int n_const = 0;
8198 int i;
8199
8200 if (GET_CODE (vals) == CONST_VECTOR)
8201 const_vec = vals;
8202 else if (GET_CODE (vals) == PARALLEL)
8203 {
8204 /* A CONST_VECTOR must contain only CONST_INTs and
8205 CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
8206 Only store valid constants in a CONST_VECTOR. */
8207 for (i = 0; i < n_elts; ++i)
8208 {
8209 rtx x = XVECEXP (vals, 0, i);
8210 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
8211 n_const++;
8212 }
8213 if (n_const == n_elts)
8214 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
8215 }
8216 else
8217 gcc_unreachable ();
8218
8219 if (const_vec != NULL_RTX
8220 && aarch64_simd_valid_immediate (const_vec, mode, false, NULL))
8221 /* Load using MOVI/MVNI. */
8222 return const_vec;
8223 else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
8224 /* Loaded using DUP. */
8225 return const_dup;
8226 else if (const_vec != NULL_RTX)
8227 /* Load from constant pool. We can not take advantage of single-cycle
8228 LD1 because we need a PC-relative addressing mode. */
8229 return const_vec;
8230 else
8231 /* A PARALLEL containing something not valid inside CONST_VECTOR.
8232 We can not construct an initializer. */
8233 return NULL_RTX;
8234 }
8235
8236 void
8237 aarch64_expand_vector_init (rtx target, rtx vals)
8238 {
8239 enum machine_mode mode = GET_MODE (target);
8240 enum machine_mode inner_mode = GET_MODE_INNER (mode);
8241 int n_elts = GET_MODE_NUNITS (mode);
8242 int n_var = 0, one_var = -1;
8243 bool all_same = true;
8244 rtx x, mem;
8245 int i;
8246
8247 x = XVECEXP (vals, 0, 0);
8248 if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
8249 n_var = 1, one_var = 0;
8250
8251 for (i = 1; i < n_elts; ++i)
8252 {
8253 x = XVECEXP (vals, 0, i);
8254 if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
8255 ++n_var, one_var = i;
8256
8257 if (!rtx_equal_p (x, XVECEXP (vals, 0, 0)))
8258 all_same = false;
8259 }
8260
8261 if (n_var == 0)
8262 {
8263 rtx constant = aarch64_simd_make_constant (vals);
8264 if (constant != NULL_RTX)
8265 {
8266 emit_move_insn (target, constant);
8267 return;
8268 }
8269 }
8270
8271 /* Splat a single non-constant element if we can. */
8272 if (all_same)
8273 {
8274 x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
8275 aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
8276 return;
8277 }
8278
8279 /* One field is non-constant. Load constant then overwrite varying
8280 field. This is more efficient than using the stack. */
8281 if (n_var == 1)
8282 {
8283 rtx copy = copy_rtx (vals);
8284 rtx index = GEN_INT (one_var);
8285 enum insn_code icode;
8286
8287 /* Load constant part of vector, substitute neighboring value for
8288 varying element. */
8289 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, one_var ^ 1);
8290 aarch64_expand_vector_init (target, copy);
8291
8292 /* Insert variable. */
8293 x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var));
8294 icode = optab_handler (vec_set_optab, mode);
8295 gcc_assert (icode != CODE_FOR_nothing);
8296 emit_insn (GEN_FCN (icode) (target, x, index));
8297 return;
8298 }
8299
8300 /* Construct the vector in memory one field at a time
8301 and load the whole vector. */
8302 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
8303 for (i = 0; i < n_elts; i++)
8304 emit_move_insn (adjust_address_nv (mem, inner_mode,
8305 i * GET_MODE_SIZE (inner_mode)),
8306 XVECEXP (vals, 0, i));
8307 emit_move_insn (target, mem);
8308
8309 }
8310
8311 static unsigned HOST_WIDE_INT
8312 aarch64_shift_truncation_mask (enum machine_mode mode)
8313 {
8314 return
8315 (aarch64_vector_mode_supported_p (mode)
8316 || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
8317 }
8318
8319 #ifndef TLS_SECTION_ASM_FLAG
8320 #define TLS_SECTION_ASM_FLAG 'T'
8321 #endif
8322
8323 void
8324 aarch64_elf_asm_named_section (const char *name, unsigned int flags,
8325 tree decl ATTRIBUTE_UNUSED)
8326 {
8327 char flagchars[10], *f = flagchars;
8328
8329 /* If we have already declared this section, we can use an
8330 abbreviated form to switch back to it -- unless this section is
8331 part of a COMDAT groups, in which case GAS requires the full
8332 declaration every time. */
8333 if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
8334 && (flags & SECTION_DECLARED))
8335 {
8336 fprintf (asm_out_file, "\t.section\t%s\n", name);
8337 return;
8338 }
8339
8340 if (!(flags & SECTION_DEBUG))
8341 *f++ = 'a';
8342 if (flags & SECTION_WRITE)
8343 *f++ = 'w';
8344 if (flags & SECTION_CODE)
8345 *f++ = 'x';
8346 if (flags & SECTION_SMALL)
8347 *f++ = 's';
8348 if (flags & SECTION_MERGE)
8349 *f++ = 'M';
8350 if (flags & SECTION_STRINGS)
8351 *f++ = 'S';
8352 if (flags & SECTION_TLS)
8353 *f++ = TLS_SECTION_ASM_FLAG;
8354 if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
8355 *f++ = 'G';
8356 *f = '\0';
8357
8358 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
8359
8360 if (!(flags & SECTION_NOTYPE))
8361 {
8362 const char *type;
8363 const char *format;
8364
8365 if (flags & SECTION_BSS)
8366 type = "nobits";
8367 else
8368 type = "progbits";
8369
8370 #ifdef TYPE_OPERAND_FMT
8371 format = "," TYPE_OPERAND_FMT;
8372 #else
8373 format = ",@%s";
8374 #endif
8375
8376 fprintf (asm_out_file, format, type);
8377
8378 if (flags & SECTION_ENTSIZE)
8379 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
8380 if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
8381 {
8382 if (TREE_CODE (decl) == IDENTIFIER_NODE)
8383 fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl));
8384 else
8385 fprintf (asm_out_file, ",%s,comdat",
8386 IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)));
8387 }
8388 }
8389
8390 putc ('\n', asm_out_file);
8391 }
8392
8393 /* Select a format to encode pointers in exception handling data. */
8394 int
8395 aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
8396 {
8397 int type;
8398 switch (aarch64_cmodel)
8399 {
8400 case AARCH64_CMODEL_TINY:
8401 case AARCH64_CMODEL_TINY_PIC:
8402 case AARCH64_CMODEL_SMALL:
8403 case AARCH64_CMODEL_SMALL_PIC:
8404 /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
8405 for everything. */
8406 type = DW_EH_PE_sdata4;
8407 break;
8408 default:
8409 /* No assumptions here. 8-byte relocs required. */
8410 type = DW_EH_PE_sdata8;
8411 break;
8412 }
8413 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
8414 }
8415
8416 /* Emit load exclusive. */
8417
8418 static void
8419 aarch64_emit_load_exclusive (enum machine_mode mode, rtx rval,
8420 rtx mem, rtx model_rtx)
8421 {
8422 rtx (*gen) (rtx, rtx, rtx);
8423
8424 switch (mode)
8425 {
8426 case QImode: gen = gen_aarch64_load_exclusiveqi; break;
8427 case HImode: gen = gen_aarch64_load_exclusivehi; break;
8428 case SImode: gen = gen_aarch64_load_exclusivesi; break;
8429 case DImode: gen = gen_aarch64_load_exclusivedi; break;
8430 default:
8431 gcc_unreachable ();
8432 }
8433
8434 emit_insn (gen (rval, mem, model_rtx));
8435 }
8436
8437 /* Emit store exclusive. */
8438
8439 static void
8440 aarch64_emit_store_exclusive (enum machine_mode mode, rtx bval,
8441 rtx rval, rtx mem, rtx model_rtx)
8442 {
8443 rtx (*gen) (rtx, rtx, rtx, rtx);
8444
8445 switch (mode)
8446 {
8447 case QImode: gen = gen_aarch64_store_exclusiveqi; break;
8448 case HImode: gen = gen_aarch64_store_exclusivehi; break;
8449 case SImode: gen = gen_aarch64_store_exclusivesi; break;
8450 case DImode: gen = gen_aarch64_store_exclusivedi; break;
8451 default:
8452 gcc_unreachable ();
8453 }
8454
8455 emit_insn (gen (bval, rval, mem, model_rtx));
8456 }
8457
8458 /* Mark the previous jump instruction as unlikely. */
8459
8460 static void
8461 aarch64_emit_unlikely_jump (rtx insn)
8462 {
8463 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
8464
8465 insn = emit_jump_insn (insn);
8466 add_int_reg_note (insn, REG_BR_PROB, very_unlikely);
8467 }
8468
8469 /* Expand a compare and swap pattern. */
8470
8471 void
8472 aarch64_expand_compare_and_swap (rtx operands[])
8473 {
8474 rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
8475 enum machine_mode mode, cmp_mode;
8476 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
8477
8478 bval = operands[0];
8479 rval = operands[1];
8480 mem = operands[2];
8481 oldval = operands[3];
8482 newval = operands[4];
8483 is_weak = operands[5];
8484 mod_s = operands[6];
8485 mod_f = operands[7];
8486 mode = GET_MODE (mem);
8487 cmp_mode = mode;
8488
8489 /* Normally the succ memory model must be stronger than fail, but in the
8490 unlikely event of fail being ACQUIRE and succ being RELEASE we need to
8491 promote succ to ACQ_REL so that we don't lose the acquire semantics. */
8492
8493 if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
8494 && INTVAL (mod_s) == MEMMODEL_RELEASE)
8495 mod_s = GEN_INT (MEMMODEL_ACQ_REL);
8496
8497 switch (mode)
8498 {
8499 case QImode:
8500 case HImode:
8501 /* For short modes, we're going to perform the comparison in SImode,
8502 so do the zero-extension now. */
8503 cmp_mode = SImode;
8504 rval = gen_reg_rtx (SImode);
8505 oldval = convert_modes (SImode, mode, oldval, true);
8506 /* Fall through. */
8507
8508 case SImode:
8509 case DImode:
8510 /* Force the value into a register if needed. */
8511 if (!aarch64_plus_operand (oldval, mode))
8512 oldval = force_reg (cmp_mode, oldval);
8513 break;
8514
8515 default:
8516 gcc_unreachable ();
8517 }
8518
8519 switch (mode)
8520 {
8521 case QImode: gen = gen_atomic_compare_and_swapqi_1; break;
8522 case HImode: gen = gen_atomic_compare_and_swaphi_1; break;
8523 case SImode: gen = gen_atomic_compare_and_swapsi_1; break;
8524 case DImode: gen = gen_atomic_compare_and_swapdi_1; break;
8525 default:
8526 gcc_unreachable ();
8527 }
8528
8529 emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
8530
8531 if (mode == QImode || mode == HImode)
8532 emit_move_insn (operands[1], gen_lowpart (mode, rval));
8533
8534 x = gen_rtx_REG (CCmode, CC_REGNUM);
8535 x = gen_rtx_EQ (SImode, x, const0_rtx);
8536 emit_insn (gen_rtx_SET (VOIDmode, bval, x));
8537 }
8538
8539 /* Split a compare and swap pattern. */
8540
8541 void
8542 aarch64_split_compare_and_swap (rtx operands[])
8543 {
8544 rtx rval, mem, oldval, newval, scratch;
8545 enum machine_mode mode;
8546 bool is_weak;
8547 rtx_code_label *label1, *label2;
8548 rtx x, cond;
8549
8550 rval = operands[0];
8551 mem = operands[1];
8552 oldval = operands[2];
8553 newval = operands[3];
8554 is_weak = (operands[4] != const0_rtx);
8555 scratch = operands[7];
8556 mode = GET_MODE (mem);
8557
8558 label1 = NULL;
8559 if (!is_weak)
8560 {
8561 label1 = gen_label_rtx ();
8562 emit_label (label1);
8563 }
8564 label2 = gen_label_rtx ();
8565
8566 aarch64_emit_load_exclusive (mode, rval, mem, operands[5]);
8567
8568 cond = aarch64_gen_compare_reg (NE, rval, oldval);
8569 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
8570 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
8571 gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
8572 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
8573
8574 aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]);
8575
8576 if (!is_weak)
8577 {
8578 x = gen_rtx_NE (VOIDmode, scratch, const0_rtx);
8579 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
8580 gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
8581 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
8582 }
8583 else
8584 {
8585 cond = gen_rtx_REG (CCmode, CC_REGNUM);
8586 x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
8587 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
8588 }
8589
8590 emit_label (label2);
8591 }
8592
8593 /* Split an atomic operation. */
8594
8595 void
8596 aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
8597 rtx value, rtx model_rtx, rtx cond)
8598 {
8599 enum machine_mode mode = GET_MODE (mem);
8600 enum machine_mode wmode = (mode == DImode ? DImode : SImode);
8601 rtx_code_label *label;
8602 rtx x;
8603
8604 label = gen_label_rtx ();
8605 emit_label (label);
8606
8607 if (new_out)
8608 new_out = gen_lowpart (wmode, new_out);
8609 if (old_out)
8610 old_out = gen_lowpart (wmode, old_out);
8611 else
8612 old_out = new_out;
8613 value = simplify_gen_subreg (wmode, value, mode, 0);
8614
8615 aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
8616
8617 switch (code)
8618 {
8619 case SET:
8620 new_out = value;
8621 break;
8622
8623 case NOT:
8624 x = gen_rtx_AND (wmode, old_out, value);
8625 emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
8626 x = gen_rtx_NOT (wmode, new_out);
8627 emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
8628 break;
8629
8630 case MINUS:
8631 if (CONST_INT_P (value))
8632 {
8633 value = GEN_INT (-INTVAL (value));
8634 code = PLUS;
8635 }
8636 /* Fall through. */
8637
8638 default:
8639 x = gen_rtx_fmt_ee (code, wmode, old_out, value);
8640 emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
8641 break;
8642 }
8643
8644 aarch64_emit_store_exclusive (mode, cond, mem,
8645 gen_lowpart (mode, new_out), model_rtx);
8646
8647 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
8648 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
8649 gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
8650 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
8651 }
8652
8653 static void
8654 aarch64_print_extension (void)
8655 {
8656 const struct aarch64_option_extension *opt = NULL;
8657
8658 for (opt = all_extensions; opt->name != NULL; opt++)
8659 if ((aarch64_isa_flags & opt->flags_on) == opt->flags_on)
8660 asm_fprintf (asm_out_file, "+%s", opt->name);
8661
8662 asm_fprintf (asm_out_file, "\n");
8663 }
8664
8665 static void
8666 aarch64_start_file (void)
8667 {
8668 if (selected_arch)
8669 {
8670 asm_fprintf (asm_out_file, "\t.arch %s", selected_arch->name);
8671 aarch64_print_extension ();
8672 }
8673 else if (selected_cpu)
8674 {
8675 const char *truncated_name
8676 = aarch64_rewrite_selected_cpu (selected_cpu->name);
8677 asm_fprintf (asm_out_file, "\t.cpu %s", truncated_name);
8678 aarch64_print_extension ();
8679 }
8680 default_file_start();
8681 }
8682
8683 /* Target hook for c_mode_for_suffix. */
8684 static enum machine_mode
8685 aarch64_c_mode_for_suffix (char suffix)
8686 {
8687 if (suffix == 'q')
8688 return TFmode;
8689
8690 return VOIDmode;
8691 }
8692
8693 /* We can only represent floating point constants which will fit in
8694 "quarter-precision" values. These values are characterised by
8695 a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given
8696 by:
8697
8698 (-1)^s * (n/16) * 2^r
8699
8700 Where:
8701 's' is the sign bit.
8702 'n' is an integer in the range 16 <= n <= 31.
8703 'r' is an integer in the range -3 <= r <= 4. */
8704
8705 /* Return true iff X can be represented by a quarter-precision
8706 floating point immediate operand X. Note, we cannot represent 0.0. */
8707 bool
8708 aarch64_float_const_representable_p (rtx x)
8709 {
8710 /* This represents our current view of how many bits
8711 make up the mantissa. */
8712 int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
8713 int exponent;
8714 unsigned HOST_WIDE_INT mantissa, mask;
8715 REAL_VALUE_TYPE r, m;
8716 bool fail;
8717
8718 if (!CONST_DOUBLE_P (x))
8719 return false;
8720
8721 if (GET_MODE (x) == VOIDmode)
8722 return false;
8723
8724 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8725
8726 /* We cannot represent infinities, NaNs or +/-zero. We won't
8727 know if we have +zero until we analyse the mantissa, but we
8728 can reject the other invalid values. */
8729 if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r)
8730 || REAL_VALUE_MINUS_ZERO (r))
8731 return false;
8732
8733 /* Extract exponent. */
8734 r = real_value_abs (&r);
8735 exponent = REAL_EXP (&r);
8736
8737 /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
8738 highest (sign) bit, with a fixed binary point at bit point_pos.
8739 m1 holds the low part of the mantissa, m2 the high part.
8740 WARNING: If we ever have a representation using more than 2 * H_W_I - 1
8741 bits for the mantissa, this can fail (low bits will be lost). */
8742 real_ldexp (&m, &r, point_pos - exponent);
8743 wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
8744
8745 /* If the low part of the mantissa has bits set we cannot represent
8746 the value. */
8747 if (w.elt (0) != 0)
8748 return false;
8749 /* We have rejected the lower HOST_WIDE_INT, so update our
8750 understanding of how many bits lie in the mantissa and
8751 look only at the high HOST_WIDE_INT. */
8752 mantissa = w.elt (1);
8753 point_pos -= HOST_BITS_PER_WIDE_INT;
8754
8755 /* We can only represent values with a mantissa of the form 1.xxxx. */
8756 mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
8757 if ((mantissa & mask) != 0)
8758 return false;
8759
8760 /* Having filtered unrepresentable values, we may now remove all
8761 but the highest 5 bits. */
8762 mantissa >>= point_pos - 5;
8763
8764 /* We cannot represent the value 0.0, so reject it. This is handled
8765 elsewhere. */
8766 if (mantissa == 0)
8767 return false;
8768
8769 /* Then, as bit 4 is always set, we can mask it off, leaving
8770 the mantissa in the range [0, 15]. */
8771 mantissa &= ~(1 << 4);
8772 gcc_assert (mantissa <= 15);
8773
8774 /* GCC internally does not use IEEE754-like encoding (where normalized
8775 significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c).
8776 Our mantissa values are shifted 4 places to the left relative to
8777 normalized IEEE754 so we must modify the exponent returned by REAL_EXP
8778 by 5 places to correct for GCC's representation. */
8779 exponent = 5 - exponent;
8780
8781 return (exponent >= 0 && exponent <= 7);
8782 }
8783
8784 char*
8785 aarch64_output_simd_mov_immediate (rtx const_vector,
8786 enum machine_mode mode,
8787 unsigned width)
8788 {
8789 bool is_valid;
8790 static char templ[40];
8791 const char *mnemonic;
8792 const char *shift_op;
8793 unsigned int lane_count = 0;
8794 char element_char;
8795
8796 struct simd_immediate_info info = { NULL_RTX, 0, 0, false, false };
8797
8798 /* This will return true to show const_vector is legal for use as either
8799 a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will
8800 also update INFO to show how the immediate should be generated. */
8801 is_valid = aarch64_simd_valid_immediate (const_vector, mode, false, &info);
8802 gcc_assert (is_valid);
8803
8804 element_char = sizetochar (info.element_width);
8805 lane_count = width / info.element_width;
8806
8807 mode = GET_MODE_INNER (mode);
8808 if (mode == SFmode || mode == DFmode)
8809 {
8810 gcc_assert (info.shift == 0 && ! info.mvn);
8811 if (aarch64_float_const_zero_rtx_p (info.value))
8812 info.value = GEN_INT (0);
8813 else
8814 {
8815 #define buf_size 20
8816 REAL_VALUE_TYPE r;
8817 REAL_VALUE_FROM_CONST_DOUBLE (r, info.value);
8818 char float_buf[buf_size] = {'\0'};
8819 real_to_decimal_for_mode (float_buf, &r, buf_size, buf_size, 1, mode);
8820 #undef buf_size
8821
8822 if (lane_count == 1)
8823 snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf);
8824 else
8825 snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s",
8826 lane_count, element_char, float_buf);
8827 return templ;
8828 }
8829 }
8830
8831 mnemonic = info.mvn ? "mvni" : "movi";
8832 shift_op = info.msl ? "msl" : "lsl";
8833
8834 if (lane_count == 1)
8835 snprintf (templ, sizeof (templ), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX,
8836 mnemonic, UINTVAL (info.value));
8837 else if (info.shift)
8838 snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
8839 ", %s %d", mnemonic, lane_count, element_char,
8840 UINTVAL (info.value), shift_op, info.shift);
8841 else
8842 snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX,
8843 mnemonic, lane_count, element_char, UINTVAL (info.value));
8844 return templ;
8845 }
8846
8847 char*
8848 aarch64_output_scalar_simd_mov_immediate (rtx immediate,
8849 enum machine_mode mode)
8850 {
8851 enum machine_mode vmode;
8852
8853 gcc_assert (!VECTOR_MODE_P (mode));
8854 vmode = aarch64_simd_container_mode (mode, 64);
8855 rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
8856 return aarch64_output_simd_mov_immediate (v_op, vmode, 64);
8857 }
8858
8859 /* Split operands into moves from op[1] + op[2] into op[0]. */
8860
8861 void
8862 aarch64_split_combinev16qi (rtx operands[3])
8863 {
8864 unsigned int dest = REGNO (operands[0]);
8865 unsigned int src1 = REGNO (operands[1]);
8866 unsigned int src2 = REGNO (operands[2]);
8867 enum machine_mode halfmode = GET_MODE (operands[1]);
8868 unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode);
8869 rtx destlo, desthi;
8870
8871 gcc_assert (halfmode == V16QImode);
8872
8873 if (src1 == dest && src2 == dest + halfregs)
8874 {
8875 /* No-op move. Can't split to nothing; emit something. */
8876 emit_note (NOTE_INSN_DELETED);
8877 return;
8878 }
8879
8880 /* Preserve register attributes for variable tracking. */
8881 destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
8882 desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
8883 GET_MODE_SIZE (halfmode));
8884
8885 /* Special case of reversed high/low parts. */
8886 if (reg_overlap_mentioned_p (operands[2], destlo)
8887 && reg_overlap_mentioned_p (operands[1], desthi))
8888 {
8889 emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
8890 emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2]));
8891 emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
8892 }
8893 else if (!reg_overlap_mentioned_p (operands[2], destlo))
8894 {
8895 /* Try to avoid unnecessary moves if part of the result
8896 is in the right place already. */
8897 if (src1 != dest)
8898 emit_move_insn (destlo, operands[1]);
8899 if (src2 != dest + halfregs)
8900 emit_move_insn (desthi, operands[2]);
8901 }
8902 else
8903 {
8904 if (src2 != dest + halfregs)
8905 emit_move_insn (desthi, operands[2]);
8906 if (src1 != dest)
8907 emit_move_insn (destlo, operands[1]);
8908 }
8909 }
8910
8911 /* vec_perm support. */
8912
8913 #define MAX_VECT_LEN 16
8914
8915 struct expand_vec_perm_d
8916 {
8917 rtx target, op0, op1;
8918 unsigned char perm[MAX_VECT_LEN];
8919 enum machine_mode vmode;
8920 unsigned char nelt;
8921 bool one_vector_p;
8922 bool testing_p;
8923 };
8924
8925 /* Generate a variable permutation. */
8926
8927 static void
8928 aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel)
8929 {
8930 enum machine_mode vmode = GET_MODE (target);
8931 bool one_vector_p = rtx_equal_p (op0, op1);
8932
8933 gcc_checking_assert (vmode == V8QImode || vmode == V16QImode);
8934 gcc_checking_assert (GET_MODE (op0) == vmode);
8935 gcc_checking_assert (GET_MODE (op1) == vmode);
8936 gcc_checking_assert (GET_MODE (sel) == vmode);
8937 gcc_checking_assert (TARGET_SIMD);
8938
8939 if (one_vector_p)
8940 {
8941 if (vmode == V8QImode)
8942 {
8943 /* Expand the argument to a V16QI mode by duplicating it. */
8944 rtx pair = gen_reg_rtx (V16QImode);
8945 emit_insn (gen_aarch64_combinev8qi (pair, op0, op0));
8946 emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
8947 }
8948 else
8949 {
8950 emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel));
8951 }
8952 }
8953 else
8954 {
8955 rtx pair;
8956
8957 if (vmode == V8QImode)
8958 {
8959 pair = gen_reg_rtx (V16QImode);
8960 emit_insn (gen_aarch64_combinev8qi (pair, op0, op1));
8961 emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
8962 }
8963 else
8964 {
8965 pair = gen_reg_rtx (OImode);
8966 emit_insn (gen_aarch64_combinev16qi (pair, op0, op1));
8967 emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel));
8968 }
8969 }
8970 }
8971
8972 void
8973 aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
8974 {
8975 enum machine_mode vmode = GET_MODE (target);
8976 unsigned int nelt = GET_MODE_NUNITS (vmode);
8977 bool one_vector_p = rtx_equal_p (op0, op1);
8978 rtx mask;
8979
8980 /* The TBL instruction does not use a modulo index, so we must take care
8981 of that ourselves. */
8982 mask = aarch64_simd_gen_const_vector_dup (vmode,
8983 one_vector_p ? nelt - 1 : 2 * nelt - 1);
8984 sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
8985
8986 /* For big-endian, we also need to reverse the index within the vector
8987 (but not which vector). */
8988 if (BYTES_BIG_ENDIAN)
8989 {
8990 /* If one_vector_p, mask is a vector of (nelt - 1)'s already. */
8991 if (!one_vector_p)
8992 mask = aarch64_simd_gen_const_vector_dup (vmode, nelt - 1);
8993 sel = expand_simple_binop (vmode, XOR, sel, mask,
8994 NULL, 0, OPTAB_LIB_WIDEN);
8995 }
8996 aarch64_expand_vec_perm_1 (target, op0, op1, sel);
8997 }
8998
8999 /* Recognize patterns suitable for the TRN instructions. */
9000 static bool
9001 aarch64_evpc_trn (struct expand_vec_perm_d *d)
9002 {
9003 unsigned int i, odd, mask, nelt = d->nelt;
9004 rtx out, in0, in1, x;
9005 rtx (*gen) (rtx, rtx, rtx);
9006 enum machine_mode vmode = d->vmode;
9007
9008 if (GET_MODE_UNIT_SIZE (vmode) > 8)
9009 return false;
9010
9011 /* Note that these are little-endian tests.
9012 We correct for big-endian later. */
9013 if (d->perm[0] == 0)
9014 odd = 0;
9015 else if (d->perm[0] == 1)
9016 odd = 1;
9017 else
9018 return false;
9019 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
9020
9021 for (i = 0; i < nelt; i += 2)
9022 {
9023 if (d->perm[i] != i + odd)
9024 return false;
9025 if (d->perm[i + 1] != ((i + nelt + odd) & mask))
9026 return false;
9027 }
9028
9029 /* Success! */
9030 if (d->testing_p)
9031 return true;
9032
9033 in0 = d->op0;
9034 in1 = d->op1;
9035 if (BYTES_BIG_ENDIAN)
9036 {
9037 x = in0, in0 = in1, in1 = x;
9038 odd = !odd;
9039 }
9040 out = d->target;
9041
9042 if (odd)
9043 {
9044 switch (vmode)
9045 {
9046 case V16QImode: gen = gen_aarch64_trn2v16qi; break;
9047 case V8QImode: gen = gen_aarch64_trn2v8qi; break;
9048 case V8HImode: gen = gen_aarch64_trn2v8hi; break;
9049 case V4HImode: gen = gen_aarch64_trn2v4hi; break;
9050 case V4SImode: gen = gen_aarch64_trn2v4si; break;
9051 case V2SImode: gen = gen_aarch64_trn2v2si; break;
9052 case V2DImode: gen = gen_aarch64_trn2v2di; break;
9053 case V4SFmode: gen = gen_aarch64_trn2v4sf; break;
9054 case V2SFmode: gen = gen_aarch64_trn2v2sf; break;
9055 case V2DFmode: gen = gen_aarch64_trn2v2df; break;
9056 default:
9057 return false;
9058 }
9059 }
9060 else
9061 {
9062 switch (vmode)
9063 {
9064 case V16QImode: gen = gen_aarch64_trn1v16qi; break;
9065 case V8QImode: gen = gen_aarch64_trn1v8qi; break;
9066 case V8HImode: gen = gen_aarch64_trn1v8hi; break;
9067 case V4HImode: gen = gen_aarch64_trn1v4hi; break;
9068 case V4SImode: gen = gen_aarch64_trn1v4si; break;
9069 case V2SImode: gen = gen_aarch64_trn1v2si; break;
9070 case V2DImode: gen = gen_aarch64_trn1v2di; break;
9071 case V4SFmode: gen = gen_aarch64_trn1v4sf; break;
9072 case V2SFmode: gen = gen_aarch64_trn1v2sf; break;
9073 case V2DFmode: gen = gen_aarch64_trn1v2df; break;
9074 default:
9075 return false;
9076 }
9077 }
9078
9079 emit_insn (gen (out, in0, in1));
9080 return true;
9081 }
9082
9083 /* Recognize patterns suitable for the UZP instructions. */
9084 static bool
9085 aarch64_evpc_uzp (struct expand_vec_perm_d *d)
9086 {
9087 unsigned int i, odd, mask, nelt = d->nelt;
9088 rtx out, in0, in1, x;
9089 rtx (*gen) (rtx, rtx, rtx);
9090 enum machine_mode vmode = d->vmode;
9091
9092 if (GET_MODE_UNIT_SIZE (vmode) > 8)
9093 return false;
9094
9095 /* Note that these are little-endian tests.
9096 We correct for big-endian later. */
9097 if (d->perm[0] == 0)
9098 odd = 0;
9099 else if (d->perm[0] == 1)
9100 odd = 1;
9101 else
9102 return false;
9103 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
9104
9105 for (i = 0; i < nelt; i++)
9106 {
9107 unsigned elt = (i * 2 + odd) & mask;
9108 if (d->perm[i] != elt)
9109 return false;
9110 }
9111
9112 /* Success! */
9113 if (d->testing_p)
9114 return true;
9115
9116 in0 = d->op0;
9117 in1 = d->op1;
9118 if (BYTES_BIG_ENDIAN)
9119 {
9120 x = in0, in0 = in1, in1 = x;
9121 odd = !odd;
9122 }
9123 out = d->target;
9124
9125 if (odd)
9126 {
9127 switch (vmode)
9128 {
9129 case V16QImode: gen = gen_aarch64_uzp2v16qi; break;
9130 case V8QImode: gen = gen_aarch64_uzp2v8qi; break;
9131 case V8HImode: gen = gen_aarch64_uzp2v8hi; break;
9132 case V4HImode: gen = gen_aarch64_uzp2v4hi; break;
9133 case V4SImode: gen = gen_aarch64_uzp2v4si; break;
9134 case V2SImode: gen = gen_aarch64_uzp2v2si; break;
9135 case V2DImode: gen = gen_aarch64_uzp2v2di; break;
9136 case V4SFmode: gen = gen_aarch64_uzp2v4sf; break;
9137 case V2SFmode: gen = gen_aarch64_uzp2v2sf; break;
9138 case V2DFmode: gen = gen_aarch64_uzp2v2df; break;
9139 default:
9140 return false;
9141 }
9142 }
9143 else
9144 {
9145 switch (vmode)
9146 {
9147 case V16QImode: gen = gen_aarch64_uzp1v16qi; break;
9148 case V8QImode: gen = gen_aarch64_uzp1v8qi; break;
9149 case V8HImode: gen = gen_aarch64_uzp1v8hi; break;
9150 case V4HImode: gen = gen_aarch64_uzp1v4hi; break;
9151 case V4SImode: gen = gen_aarch64_uzp1v4si; break;
9152 case V2SImode: gen = gen_aarch64_uzp1v2si; break;
9153 case V2DImode: gen = gen_aarch64_uzp1v2di; break;
9154 case V4SFmode: gen = gen_aarch64_uzp1v4sf; break;
9155 case V2SFmode: gen = gen_aarch64_uzp1v2sf; break;
9156 case V2DFmode: gen = gen_aarch64_uzp1v2df; break;
9157 default:
9158 return false;
9159 }
9160 }
9161
9162 emit_insn (gen (out, in0, in1));
9163 return true;
9164 }
9165
9166 /* Recognize patterns suitable for the ZIP instructions. */
9167 static bool
9168 aarch64_evpc_zip (struct expand_vec_perm_d *d)
9169 {
9170 unsigned int i, high, mask, nelt = d->nelt;
9171 rtx out, in0, in1, x;
9172 rtx (*gen) (rtx, rtx, rtx);
9173 enum machine_mode vmode = d->vmode;
9174
9175 if (GET_MODE_UNIT_SIZE (vmode) > 8)
9176 return false;
9177
9178 /* Note that these are little-endian tests.
9179 We correct for big-endian later. */
9180 high = nelt / 2;
9181 if (d->perm[0] == high)
9182 /* Do Nothing. */
9183 ;
9184 else if (d->perm[0] == 0)
9185 high = 0;
9186 else
9187 return false;
9188 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
9189
9190 for (i = 0; i < nelt / 2; i++)
9191 {
9192 unsigned elt = (i + high) & mask;
9193 if (d->perm[i * 2] != elt)
9194 return false;
9195 elt = (elt + nelt) & mask;
9196 if (d->perm[i * 2 + 1] != elt)
9197 return false;
9198 }
9199
9200 /* Success! */
9201 if (d->testing_p)
9202 return true;
9203
9204 in0 = d->op0;
9205 in1 = d->op1;
9206 if (BYTES_BIG_ENDIAN)
9207 {
9208 x = in0, in0 = in1, in1 = x;
9209 high = !high;
9210 }
9211 out = d->target;
9212
9213 if (high)
9214 {
9215 switch (vmode)
9216 {
9217 case V16QImode: gen = gen_aarch64_zip2v16qi; break;
9218 case V8QImode: gen = gen_aarch64_zip2v8qi; break;
9219 case V8HImode: gen = gen_aarch64_zip2v8hi; break;
9220 case V4HImode: gen = gen_aarch64_zip2v4hi; break;
9221 case V4SImode: gen = gen_aarch64_zip2v4si; break;
9222 case V2SImode: gen = gen_aarch64_zip2v2si; break;
9223 case V2DImode: gen = gen_aarch64_zip2v2di; break;
9224 case V4SFmode: gen = gen_aarch64_zip2v4sf; break;
9225 case V2SFmode: gen = gen_aarch64_zip2v2sf; break;
9226 case V2DFmode: gen = gen_aarch64_zip2v2df; break;
9227 default:
9228 return false;
9229 }
9230 }
9231 else
9232 {
9233 switch (vmode)
9234 {
9235 case V16QImode: gen = gen_aarch64_zip1v16qi; break;
9236 case V8QImode: gen = gen_aarch64_zip1v8qi; break;
9237 case V8HImode: gen = gen_aarch64_zip1v8hi; break;
9238 case V4HImode: gen = gen_aarch64_zip1v4hi; break;
9239 case V4SImode: gen = gen_aarch64_zip1v4si; break;
9240 case V2SImode: gen = gen_aarch64_zip1v2si; break;
9241 case V2DImode: gen = gen_aarch64_zip1v2di; break;
9242 case V4SFmode: gen = gen_aarch64_zip1v4sf; break;
9243 case V2SFmode: gen = gen_aarch64_zip1v2sf; break;
9244 case V2DFmode: gen = gen_aarch64_zip1v2df; break;
9245 default:
9246 return false;
9247 }
9248 }
9249
9250 emit_insn (gen (out, in0, in1));
9251 return true;
9252 }
9253
9254 /* Recognize patterns for the EXT insn. */
9255
9256 static bool
9257 aarch64_evpc_ext (struct expand_vec_perm_d *d)
9258 {
9259 unsigned int i, nelt = d->nelt;
9260 rtx (*gen) (rtx, rtx, rtx, rtx);
9261 rtx offset;
9262
9263 unsigned int location = d->perm[0]; /* Always < nelt. */
9264
9265 /* Check if the extracted indices are increasing by one. */
9266 for (i = 1; i < nelt; i++)
9267 {
9268 unsigned int required = location + i;
9269 if (d->one_vector_p)
9270 {
9271 /* We'll pass the same vector in twice, so allow indices to wrap. */
9272 required &= (nelt - 1);
9273 }
9274 if (d->perm[i] != required)
9275 return false;
9276 }
9277
9278 switch (d->vmode)
9279 {
9280 case V16QImode: gen = gen_aarch64_extv16qi; break;
9281 case V8QImode: gen = gen_aarch64_extv8qi; break;
9282 case V4HImode: gen = gen_aarch64_extv4hi; break;
9283 case V8HImode: gen = gen_aarch64_extv8hi; break;
9284 case V2SImode: gen = gen_aarch64_extv2si; break;
9285 case V4SImode: gen = gen_aarch64_extv4si; break;
9286 case V2SFmode: gen = gen_aarch64_extv2sf; break;
9287 case V4SFmode: gen = gen_aarch64_extv4sf; break;
9288 case V2DImode: gen = gen_aarch64_extv2di; break;
9289 case V2DFmode: gen = gen_aarch64_extv2df; break;
9290 default:
9291 return false;
9292 }
9293
9294 /* Success! */
9295 if (d->testing_p)
9296 return true;
9297
9298 /* The case where (location == 0) is a no-op for both big- and little-endian,
9299 and is removed by the mid-end at optimization levels -O1 and higher. */
9300
9301 if (BYTES_BIG_ENDIAN && (location != 0))
9302 {
9303 /* After setup, we want the high elements of the first vector (stored
9304 at the LSB end of the register), and the low elements of the second
9305 vector (stored at the MSB end of the register). So swap. */
9306 rtx temp = d->op0;
9307 d->op0 = d->op1;
9308 d->op1 = temp;
9309 /* location != 0 (above), so safe to assume (nelt - location) < nelt. */
9310 location = nelt - location;
9311 }
9312
9313 offset = GEN_INT (location);
9314 emit_insn (gen (d->target, d->op0, d->op1, offset));
9315 return true;
9316 }
9317
9318 /* Recognize patterns for the REV insns. */
9319
9320 static bool
9321 aarch64_evpc_rev (struct expand_vec_perm_d *d)
9322 {
9323 unsigned int i, j, diff, nelt = d->nelt;
9324 rtx (*gen) (rtx, rtx);
9325
9326 if (!d->one_vector_p)
9327 return false;
9328
9329 diff = d->perm[0];
9330 switch (diff)
9331 {
9332 case 7:
9333 switch (d->vmode)
9334 {
9335 case V16QImode: gen = gen_aarch64_rev64v16qi; break;
9336 case V8QImode: gen = gen_aarch64_rev64v8qi; break;
9337 default:
9338 return false;
9339 }
9340 break;
9341 case 3:
9342 switch (d->vmode)
9343 {
9344 case V16QImode: gen = gen_aarch64_rev32v16qi; break;
9345 case V8QImode: gen = gen_aarch64_rev32v8qi; break;
9346 case V8HImode: gen = gen_aarch64_rev64v8hi; break;
9347 case V4HImode: gen = gen_aarch64_rev64v4hi; break;
9348 default:
9349 return false;
9350 }
9351 break;
9352 case 1:
9353 switch (d->vmode)
9354 {
9355 case V16QImode: gen = gen_aarch64_rev16v16qi; break;
9356 case V8QImode: gen = gen_aarch64_rev16v8qi; break;
9357 case V8HImode: gen = gen_aarch64_rev32v8hi; break;
9358 case V4HImode: gen = gen_aarch64_rev32v4hi; break;
9359 case V4SImode: gen = gen_aarch64_rev64v4si; break;
9360 case V2SImode: gen = gen_aarch64_rev64v2si; break;
9361 case V4SFmode: gen = gen_aarch64_rev64v4sf; break;
9362 case V2SFmode: gen = gen_aarch64_rev64v2sf; break;
9363 default:
9364 return false;
9365 }
9366 break;
9367 default:
9368 return false;
9369 }
9370
9371 for (i = 0; i < nelt ; i += diff + 1)
9372 for (j = 0; j <= diff; j += 1)
9373 {
9374 /* This is guaranteed to be true as the value of diff
9375 is 7, 3, 1 and we should have enough elements in the
9376 queue to generate this. Getting a vector mask with a
9377 value of diff other than these values implies that
9378 something is wrong by the time we get here. */
9379 gcc_assert (i + j < nelt);
9380 if (d->perm[i + j] != i + diff - j)
9381 return false;
9382 }
9383
9384 /* Success! */
9385 if (d->testing_p)
9386 return true;
9387
9388 emit_insn (gen (d->target, d->op0));
9389 return true;
9390 }
9391
9392 static bool
9393 aarch64_evpc_dup (struct expand_vec_perm_d *d)
9394 {
9395 rtx (*gen) (rtx, rtx, rtx);
9396 rtx out = d->target;
9397 rtx in0;
9398 enum machine_mode vmode = d->vmode;
9399 unsigned int i, elt, nelt = d->nelt;
9400 rtx lane;
9401
9402 elt = d->perm[0];
9403 for (i = 1; i < nelt; i++)
9404 {
9405 if (elt != d->perm[i])
9406 return false;
9407 }
9408
9409 /* The generic preparation in aarch64_expand_vec_perm_const_1
9410 swaps the operand order and the permute indices if it finds
9411 d->perm[0] to be in the second operand. Thus, we can always
9412 use d->op0 and need not do any extra arithmetic to get the
9413 correct lane number. */
9414 in0 = d->op0;
9415 lane = GEN_INT (elt); /* The pattern corrects for big-endian. */
9416
9417 switch (vmode)
9418 {
9419 case V16QImode: gen = gen_aarch64_dup_lanev16qi; break;
9420 case V8QImode: gen = gen_aarch64_dup_lanev8qi; break;
9421 case V8HImode: gen = gen_aarch64_dup_lanev8hi; break;
9422 case V4HImode: gen = gen_aarch64_dup_lanev4hi; break;
9423 case V4SImode: gen = gen_aarch64_dup_lanev4si; break;
9424 case V2SImode: gen = gen_aarch64_dup_lanev2si; break;
9425 case V2DImode: gen = gen_aarch64_dup_lanev2di; break;
9426 case V4SFmode: gen = gen_aarch64_dup_lanev4sf; break;
9427 case V2SFmode: gen = gen_aarch64_dup_lanev2sf; break;
9428 case V2DFmode: gen = gen_aarch64_dup_lanev2df; break;
9429 default:
9430 return false;
9431 }
9432
9433 emit_insn (gen (out, in0, lane));
9434 return true;
9435 }
9436
9437 static bool
9438 aarch64_evpc_tbl (struct expand_vec_perm_d *d)
9439 {
9440 rtx rperm[MAX_VECT_LEN], sel;
9441 enum machine_mode vmode = d->vmode;
9442 unsigned int i, nelt = d->nelt;
9443
9444 if (d->testing_p)
9445 return true;
9446
9447 /* Generic code will try constant permutation twice. Once with the
9448 original mode and again with the elements lowered to QImode.
9449 So wait and don't do the selector expansion ourselves. */
9450 if (vmode != V8QImode && vmode != V16QImode)
9451 return false;
9452
9453 for (i = 0; i < nelt; ++i)
9454 {
9455 int nunits = GET_MODE_NUNITS (vmode);
9456
9457 /* If big-endian and two vectors we end up with a weird mixed-endian
9458 mode on NEON. Reverse the index within each word but not the word
9459 itself. */
9460 rperm[i] = GEN_INT (BYTES_BIG_ENDIAN ? d->perm[i] ^ (nunits - 1)
9461 : d->perm[i]);
9462 }
9463 sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
9464 sel = force_reg (vmode, sel);
9465
9466 aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel);
9467 return true;
9468 }
9469
9470 static bool
9471 aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
9472 {
9473 /* The pattern matching functions above are written to look for a small
9474 number to begin the sequence (0, 1, N/2). If we begin with an index
9475 from the second operand, we can swap the operands. */
9476 if (d->perm[0] >= d->nelt)
9477 {
9478 unsigned i, nelt = d->nelt;
9479 rtx x;
9480
9481 gcc_assert (nelt == (nelt & -nelt));
9482 for (i = 0; i < nelt; ++i)
9483 d->perm[i] ^= nelt; /* Keep the same index, but in the other vector. */
9484
9485 x = d->op0;
9486 d->op0 = d->op1;
9487 d->op1 = x;
9488 }
9489
9490 if (TARGET_SIMD)
9491 {
9492 if (aarch64_evpc_rev (d))
9493 return true;
9494 else if (aarch64_evpc_ext (d))
9495 return true;
9496 else if (aarch64_evpc_dup (d))
9497 return true;
9498 else if (aarch64_evpc_zip (d))
9499 return true;
9500 else if (aarch64_evpc_uzp (d))
9501 return true;
9502 else if (aarch64_evpc_trn (d))
9503 return true;
9504 return aarch64_evpc_tbl (d);
9505 }
9506 return false;
9507 }
9508
9509 /* Expand a vec_perm_const pattern. */
9510
9511 bool
9512 aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel)
9513 {
9514 struct expand_vec_perm_d d;
9515 int i, nelt, which;
9516
9517 d.target = target;
9518 d.op0 = op0;
9519 d.op1 = op1;
9520
9521 d.vmode = GET_MODE (target);
9522 gcc_assert (VECTOR_MODE_P (d.vmode));
9523 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
9524 d.testing_p = false;
9525
9526 for (i = which = 0; i < nelt; ++i)
9527 {
9528 rtx e = XVECEXP (sel, 0, i);
9529 int ei = INTVAL (e) & (2 * nelt - 1);
9530 which |= (ei < nelt ? 1 : 2);
9531 d.perm[i] = ei;
9532 }
9533
9534 switch (which)
9535 {
9536 default:
9537 gcc_unreachable ();
9538
9539 case 3:
9540 d.one_vector_p = false;
9541 if (!rtx_equal_p (op0, op1))
9542 break;
9543
9544 /* The elements of PERM do not suggest that only the first operand
9545 is used, but both operands are identical. Allow easier matching
9546 of the permutation by folding the permutation into the single
9547 input vector. */
9548 /* Fall Through. */
9549 case 2:
9550 for (i = 0; i < nelt; ++i)
9551 d.perm[i] &= nelt - 1;
9552 d.op0 = op1;
9553 d.one_vector_p = true;
9554 break;
9555
9556 case 1:
9557 d.op1 = op0;
9558 d.one_vector_p = true;
9559 break;
9560 }
9561
9562 return aarch64_expand_vec_perm_const_1 (&d);
9563 }
9564
9565 static bool
9566 aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
9567 const unsigned char *sel)
9568 {
9569 struct expand_vec_perm_d d;
9570 unsigned int i, nelt, which;
9571 bool ret;
9572
9573 d.vmode = vmode;
9574 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
9575 d.testing_p = true;
9576 memcpy (d.perm, sel, nelt);
9577
9578 /* Calculate whether all elements are in one vector. */
9579 for (i = which = 0; i < nelt; ++i)
9580 {
9581 unsigned char e = d.perm[i];
9582 gcc_assert (e < 2 * nelt);
9583 which |= (e < nelt ? 1 : 2);
9584 }
9585
9586 /* If all elements are from the second vector, reindex as if from the
9587 first vector. */
9588 if (which == 2)
9589 for (i = 0; i < nelt; ++i)
9590 d.perm[i] -= nelt;
9591
9592 /* Check whether the mask can be applied to a single vector. */
9593 d.one_vector_p = (which != 3);
9594
9595 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
9596 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
9597 if (!d.one_vector_p)
9598 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
9599
9600 start_sequence ();
9601 ret = aarch64_expand_vec_perm_const_1 (&d);
9602 end_sequence ();
9603
9604 return ret;
9605 }
9606
9607 /* Implement target hook CANNOT_CHANGE_MODE_CLASS. */
9608 bool
9609 aarch64_cannot_change_mode_class (enum machine_mode from,
9610 enum machine_mode to,
9611 enum reg_class rclass)
9612 {
9613 /* Full-reg subregs are allowed on general regs or any class if they are
9614 the same size. */
9615 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)
9616 || !reg_classes_intersect_p (FP_REGS, rclass))
9617 return false;
9618
9619 /* Limited combinations of subregs are safe on FPREGs. Particularly,
9620 1. Vector Mode to Scalar mode where 1 unit of the vector is accessed.
9621 2. Scalar to Scalar for integer modes or same size float modes.
9622 3. Vector to Vector modes.
9623 4. On little-endian only, Vector-Structure to Vector modes. */
9624 if (GET_MODE_SIZE (from) > GET_MODE_SIZE (to))
9625 {
9626 if (aarch64_vector_mode_supported_p (from)
9627 && GET_MODE_SIZE (GET_MODE_INNER (from)) == GET_MODE_SIZE (to))
9628 return false;
9629
9630 if (GET_MODE_NUNITS (from) == 1
9631 && GET_MODE_NUNITS (to) == 1
9632 && (GET_MODE_CLASS (from) == MODE_INT
9633 || from == to))
9634 return false;
9635
9636 if (aarch64_vector_mode_supported_p (from)
9637 && aarch64_vector_mode_supported_p (to))
9638 return false;
9639
9640 /* Within an vector structure straddling multiple vector registers
9641 we are in a mixed-endian representation. As such, we can't
9642 easily change modes for BYTES_BIG_ENDIAN. Otherwise, we can
9643 switch between vectors and vector structures cheaply. */
9644 if (!BYTES_BIG_ENDIAN)
9645 if ((aarch64_vector_mode_supported_p (from)
9646 && aarch64_vect_struct_mode_p (to))
9647 || (aarch64_vector_mode_supported_p (to)
9648 && aarch64_vect_struct_mode_p (from)))
9649 return false;
9650 }
9651
9652 return true;
9653 }
9654
9655 /* Implement MODES_TIEABLE_P. */
9656
9657 bool
9658 aarch64_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9659 {
9660 if (GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2))
9661 return true;
9662
9663 /* We specifically want to allow elements of "structure" modes to
9664 be tieable to the structure. This more general condition allows
9665 other rarer situations too. */
9666 if (TARGET_SIMD
9667 && aarch64_vector_mode_p (mode1)
9668 && aarch64_vector_mode_p (mode2))
9669 return true;
9670
9671 return false;
9672 }
9673
9674 /* Return a new RTX holding the result of moving POINTER forward by
9675 AMOUNT bytes. */
9676
9677 static rtx
9678 aarch64_move_pointer (rtx pointer, int amount)
9679 {
9680 rtx next = plus_constant (Pmode, XEXP (pointer, 0), amount);
9681
9682 return adjust_automodify_address (pointer, GET_MODE (pointer),
9683 next, amount);
9684 }
9685
9686 /* Return a new RTX holding the result of moving POINTER forward by the
9687 size of the mode it points to. */
9688
9689 static rtx
9690 aarch64_progress_pointer (rtx pointer)
9691 {
9692 HOST_WIDE_INT amount = GET_MODE_SIZE (GET_MODE (pointer));
9693
9694 return aarch64_move_pointer (pointer, amount);
9695 }
9696
9697 /* Copy one MODE sized block from SRC to DST, then progress SRC and DST by
9698 MODE bytes. */
9699
9700 static void
9701 aarch64_copy_one_block_and_progress_pointers (rtx *src, rtx *dst,
9702 enum machine_mode mode)
9703 {
9704 rtx reg = gen_reg_rtx (mode);
9705
9706 /* "Cast" the pointers to the correct mode. */
9707 *src = adjust_address (*src, mode, 0);
9708 *dst = adjust_address (*dst, mode, 0);
9709 /* Emit the memcpy. */
9710 emit_move_insn (reg, *src);
9711 emit_move_insn (*dst, reg);
9712 /* Move the pointers forward. */
9713 *src = aarch64_progress_pointer (*src);
9714 *dst = aarch64_progress_pointer (*dst);
9715 }
9716
9717 /* Expand movmem, as if from a __builtin_memcpy. Return true if
9718 we succeed, otherwise return false. */
9719
9720 bool
9721 aarch64_expand_movmem (rtx *operands)
9722 {
9723 unsigned int n;
9724 rtx dst = operands[0];
9725 rtx src = operands[1];
9726 rtx base;
9727 bool speed_p = !optimize_function_for_size_p (cfun);
9728
9729 /* When optimizing for size, give a better estimate of the length of a
9730 memcpy call, but use the default otherwise. */
9731 unsigned int max_instructions = (speed_p ? 15 : AARCH64_CALL_RATIO) / 2;
9732
9733 /* We can't do anything smart if the amount to copy is not constant. */
9734 if (!CONST_INT_P (operands[2]))
9735 return false;
9736
9737 n = UINTVAL (operands[2]);
9738
9739 /* Try to keep the number of instructions low. For cases below 16 bytes we
9740 need to make at most two moves. For cases above 16 bytes it will be one
9741 move for each 16 byte chunk, then at most two additional moves. */
9742 if (((n / 16) + (n % 16 ? 2 : 0)) > max_instructions)
9743 return false;
9744
9745 base = copy_to_mode_reg (Pmode, XEXP (dst, 0));
9746 dst = adjust_automodify_address (dst, VOIDmode, base, 0);
9747
9748 base = copy_to_mode_reg (Pmode, XEXP (src, 0));
9749 src = adjust_automodify_address (src, VOIDmode, base, 0);
9750
9751 /* Simple cases. Copy 0-3 bytes, as (if applicable) a 2-byte, then a
9752 1-byte chunk. */
9753 if (n < 4)
9754 {
9755 if (n >= 2)
9756 {
9757 aarch64_copy_one_block_and_progress_pointers (&src, &dst, HImode);
9758 n -= 2;
9759 }
9760
9761 if (n == 1)
9762 aarch64_copy_one_block_and_progress_pointers (&src, &dst, QImode);
9763
9764 return true;
9765 }
9766
9767 /* Copy 4-8 bytes. First a 4-byte chunk, then (if applicable) a second
9768 4-byte chunk, partially overlapping with the previously copied chunk. */
9769 if (n < 8)
9770 {
9771 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
9772 n -= 4;
9773 if (n > 0)
9774 {
9775 int move = n - 4;
9776
9777 src = aarch64_move_pointer (src, move);
9778 dst = aarch64_move_pointer (dst, move);
9779 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
9780 }
9781 return true;
9782 }
9783
9784 /* Copy more than 8 bytes. Copy chunks of 16 bytes until we run out of
9785 them, then (if applicable) an 8-byte chunk. */
9786 while (n >= 8)
9787 {
9788 if (n / 16)
9789 {
9790 aarch64_copy_one_block_and_progress_pointers (&src, &dst, TImode);
9791 n -= 16;
9792 }
9793 else
9794 {
9795 aarch64_copy_one_block_and_progress_pointers (&src, &dst, DImode);
9796 n -= 8;
9797 }
9798 }
9799
9800 /* Finish the final bytes of the copy. We can always do this in one
9801 instruction. We either copy the exact amount we need, or partially
9802 overlap with the previous chunk we copied and copy 8-bytes. */
9803 if (n == 0)
9804 return true;
9805 else if (n == 1)
9806 aarch64_copy_one_block_and_progress_pointers (&src, &dst, QImode);
9807 else if (n == 2)
9808 aarch64_copy_one_block_and_progress_pointers (&src, &dst, HImode);
9809 else if (n == 4)
9810 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
9811 else
9812 {
9813 if (n == 3)
9814 {
9815 src = aarch64_move_pointer (src, -1);
9816 dst = aarch64_move_pointer (dst, -1);
9817 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
9818 }
9819 else
9820 {
9821 int move = n - 8;
9822
9823 src = aarch64_move_pointer (src, move);
9824 dst = aarch64_move_pointer (dst, move);
9825 aarch64_copy_one_block_and_progress_pointers (&src, &dst, DImode);
9826 }
9827 }
9828
9829 return true;
9830 }
9831
9832 #undef TARGET_ADDRESS_COST
9833 #define TARGET_ADDRESS_COST aarch64_address_cost
9834
9835 /* This hook will determines whether unnamed bitfields affect the alignment
9836 of the containing structure. The hook returns true if the structure
9837 should inherit the alignment requirements of an unnamed bitfield's
9838 type. */
9839 #undef TARGET_ALIGN_ANON_BITFIELD
9840 #define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
9841
9842 #undef TARGET_ASM_ALIGNED_DI_OP
9843 #define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
9844
9845 #undef TARGET_ASM_ALIGNED_HI_OP
9846 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
9847
9848 #undef TARGET_ASM_ALIGNED_SI_OP
9849 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
9850
9851 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9852 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
9853 hook_bool_const_tree_hwi_hwi_const_tree_true
9854
9855 #undef TARGET_ASM_FILE_START
9856 #define TARGET_ASM_FILE_START aarch64_start_file
9857
9858 #undef TARGET_ASM_OUTPUT_MI_THUNK
9859 #define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
9860
9861 #undef TARGET_ASM_SELECT_RTX_SECTION
9862 #define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
9863
9864 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
9865 #define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
9866
9867 #undef TARGET_BUILD_BUILTIN_VA_LIST
9868 #define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
9869
9870 #undef TARGET_CALLEE_COPIES
9871 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
9872
9873 #undef TARGET_CAN_ELIMINATE
9874 #define TARGET_CAN_ELIMINATE aarch64_can_eliminate
9875
9876 #undef TARGET_CANNOT_FORCE_CONST_MEM
9877 #define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
9878
9879 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9880 #define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
9881
9882 /* Only the least significant bit is used for initialization guard
9883 variables. */
9884 #undef TARGET_CXX_GUARD_MASK_BIT
9885 #define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
9886
9887 #undef TARGET_C_MODE_FOR_SUFFIX
9888 #define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
9889
9890 #ifdef TARGET_BIG_ENDIAN_DEFAULT
9891 #undef TARGET_DEFAULT_TARGET_FLAGS
9892 #define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
9893 #endif
9894
9895 #undef TARGET_CLASS_MAX_NREGS
9896 #define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
9897
9898 #undef TARGET_BUILTIN_DECL
9899 #define TARGET_BUILTIN_DECL aarch64_builtin_decl
9900
9901 #undef TARGET_EXPAND_BUILTIN
9902 #define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
9903
9904 #undef TARGET_EXPAND_BUILTIN_VA_START
9905 #define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
9906
9907 #undef TARGET_FOLD_BUILTIN
9908 #define TARGET_FOLD_BUILTIN aarch64_fold_builtin
9909
9910 #undef TARGET_FUNCTION_ARG
9911 #define TARGET_FUNCTION_ARG aarch64_function_arg
9912
9913 #undef TARGET_FUNCTION_ARG_ADVANCE
9914 #define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
9915
9916 #undef TARGET_FUNCTION_ARG_BOUNDARY
9917 #define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
9918
9919 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9920 #define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
9921
9922 #undef TARGET_FUNCTION_VALUE
9923 #define TARGET_FUNCTION_VALUE aarch64_function_value
9924
9925 #undef TARGET_FUNCTION_VALUE_REGNO_P
9926 #define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
9927
9928 #undef TARGET_FRAME_POINTER_REQUIRED
9929 #define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
9930
9931 #undef TARGET_GIMPLE_FOLD_BUILTIN
9932 #define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
9933
9934 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9935 #define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
9936
9937 #undef TARGET_INIT_BUILTINS
9938 #define TARGET_INIT_BUILTINS aarch64_init_builtins
9939
9940 #undef TARGET_LEGITIMATE_ADDRESS_P
9941 #define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
9942
9943 #undef TARGET_LEGITIMATE_CONSTANT_P
9944 #define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
9945
9946 #undef TARGET_LIBGCC_CMP_RETURN_MODE
9947 #define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
9948
9949 #undef TARGET_LRA_P
9950 #define TARGET_LRA_P aarch64_lra_p
9951
9952 #undef TARGET_MANGLE_TYPE
9953 #define TARGET_MANGLE_TYPE aarch64_mangle_type
9954
9955 #undef TARGET_MEMORY_MOVE_COST
9956 #define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
9957
9958 #undef TARGET_MUST_PASS_IN_STACK
9959 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
9960
9961 /* This target hook should return true if accesses to volatile bitfields
9962 should use the narrowest mode possible. It should return false if these
9963 accesses should use the bitfield container type. */
9964 #undef TARGET_NARROW_VOLATILE_BITFIELD
9965 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
9966
9967 #undef TARGET_OPTION_OVERRIDE
9968 #define TARGET_OPTION_OVERRIDE aarch64_override_options
9969
9970 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
9971 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
9972 aarch64_override_options_after_change
9973
9974 #undef TARGET_PASS_BY_REFERENCE
9975 #define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
9976
9977 #undef TARGET_PREFERRED_RELOAD_CLASS
9978 #define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
9979
9980 #undef TARGET_SECONDARY_RELOAD
9981 #define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
9982
9983 #undef TARGET_SHIFT_TRUNCATION_MASK
9984 #define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
9985
9986 #undef TARGET_SETUP_INCOMING_VARARGS
9987 #define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
9988
9989 #undef TARGET_STRUCT_VALUE_RTX
9990 #define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
9991
9992 #undef TARGET_REGISTER_MOVE_COST
9993 #define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
9994
9995 #undef TARGET_RETURN_IN_MEMORY
9996 #define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
9997
9998 #undef TARGET_RETURN_IN_MSB
9999 #define TARGET_RETURN_IN_MSB aarch64_return_in_msb
10000
10001 #undef TARGET_RTX_COSTS
10002 #define TARGET_RTX_COSTS aarch64_rtx_costs_wrapper
10003
10004 #undef TARGET_SCHED_ISSUE_RATE
10005 #define TARGET_SCHED_ISSUE_RATE aarch64_sched_issue_rate
10006
10007 #undef TARGET_TRAMPOLINE_INIT
10008 #define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
10009
10010 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10011 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
10012
10013 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10014 #define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
10015
10016 #undef TARGET_ARRAY_MODE_SUPPORTED_P
10017 #define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
10018
10019 #undef TARGET_VECTORIZE_ADD_STMT_COST
10020 #define TARGET_VECTORIZE_ADD_STMT_COST aarch64_add_stmt_cost
10021
10022 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
10023 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
10024 aarch64_builtin_vectorization_cost
10025
10026 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
10027 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
10028
10029 #undef TARGET_VECTORIZE_BUILTINS
10030 #define TARGET_VECTORIZE_BUILTINS
10031
10032 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
10033 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
10034 aarch64_builtin_vectorized_function
10035
10036 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
10037 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
10038 aarch64_autovectorize_vector_sizes
10039
10040 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10041 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV \
10042 aarch64_atomic_assign_expand_fenv
10043
10044 /* Section anchor support. */
10045
10046 #undef TARGET_MIN_ANCHOR_OFFSET
10047 #define TARGET_MIN_ANCHOR_OFFSET -256
10048
10049 /* Limit the maximum anchor offset to 4k-1, since that's the limit for a
10050 byte offset; we can do much more for larger data types, but have no way
10051 to determine the size of the access. We assume accesses are aligned. */
10052 #undef TARGET_MAX_ANCHOR_OFFSET
10053 #define TARGET_MAX_ANCHOR_OFFSET 4095
10054
10055 #undef TARGET_VECTOR_ALIGNMENT
10056 #define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment
10057
10058 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
10059 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
10060 aarch64_simd_vector_alignment_reachable
10061
10062 /* vec_perm support. */
10063
10064 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
10065 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
10066 aarch64_vectorize_vec_perm_const_ok
10067
10068
10069 #undef TARGET_FIXED_CONDITION_CODE_REGS
10070 #define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
10071
10072 #undef TARGET_FLAGS_REGNUM
10073 #define TARGET_FLAGS_REGNUM CC_REGNUM
10074
10075 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
10076 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
10077
10078 struct gcc_target targetm = TARGET_INITIALIZER;
10079
10080 #include "gt-aarch64.h"