1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "insn-codes.h"
27 #include "insn-attr.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
35 #include "hard-reg-set.h"
41 #include "target-def.h"
42 #include "targhooks.h"
47 #include "langhooks.h"
48 #include "diagnostic-core.h"
49 #include "hash-table.h"
51 #include "basic-block.h"
52 #include "tree-ssa-alias.h"
53 #include "internal-fn.h"
54 #include "gimple-fold.h"
56 #include "gimple-expr.h"
63 #include "tree-vectorizer.h"
64 #include "config/arm/aarch-cost-tables.h"
68 /* Defined for convenience. */
69 #define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT)
71 /* Classifies an address.
74 A simple base register plus immediate offset.
77 A base register indexed by immediate offset with writeback.
80 A base register indexed by (optionally scaled) register.
83 A base register indexed by (optionally scaled) zero-extended register.
86 A base register indexed by (optionally scaled) sign-extended register.
89 A LO_SUM rtx with a base register and "LO12" symbol relocation.
92 A constant symbolic address, in pc-relative literal pool. */
94 enum aarch64_address_type
{
104 struct aarch64_address_info
{
105 enum aarch64_address_type type
;
109 enum aarch64_symbol_type symbol_type
;
112 struct simd_immediate_info
121 /* The current code model. */
122 enum aarch64_code_model aarch64_cmodel
;
125 #undef TARGET_HAVE_TLS
126 #define TARGET_HAVE_TLS 1
129 static bool aarch64_lra_p (void);
130 static bool aarch64_composite_type_p (const_tree
, enum machine_mode
);
131 static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode
,
133 enum machine_mode
*, int *,
135 static void aarch64_elf_asm_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
136 static void aarch64_elf_asm_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
137 static void aarch64_override_options_after_change (void);
138 static bool aarch64_vector_mode_supported_p (enum machine_mode
);
139 static unsigned bit_count (unsigned HOST_WIDE_INT
);
140 static bool aarch64_const_vec_all_same_int_p (rtx
,
141 HOST_WIDE_INT
, HOST_WIDE_INT
);
143 static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
144 const unsigned char *sel
);
145 static int aarch64_address_cost (rtx
, enum machine_mode
, addr_space_t
, bool);
147 /* The processor for which instructions should be scheduled. */
148 enum aarch64_processor aarch64_tune
= cortexa53
;
150 /* The current tuning set. */
151 const struct tune_params
*aarch64_tune_params
;
153 /* Mask to specify which instructions we are allowed to generate. */
154 unsigned long aarch64_isa_flags
= 0;
156 /* Mask to specify which instruction scheduling options should be used. */
157 unsigned long aarch64_tune_flags
= 0;
159 /* Tuning parameters. */
161 #if HAVE_DESIGNATED_INITIALIZERS
162 #define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
164 #define NAMED_PARAM(NAME, VAL) (VAL)
167 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
171 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
174 static const struct cpu_addrcost_table generic_addrcost_table
=
176 #if HAVE_DESIGNATED_INITIALIZERS
185 NAMED_PARAM (pre_modify
, 0),
186 NAMED_PARAM (post_modify
, 0),
187 NAMED_PARAM (register_offset
, 0),
188 NAMED_PARAM (register_extend
, 0),
189 NAMED_PARAM (imm_offset
, 0)
192 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
195 static const struct cpu_addrcost_table cortexa57_addrcost_table
=
197 #if HAVE_DESIGNATED_INITIALIZERS
206 NAMED_PARAM (pre_modify
, 0),
207 NAMED_PARAM (post_modify
, 0),
208 NAMED_PARAM (register_offset
, 0),
209 NAMED_PARAM (register_extend
, 0),
210 NAMED_PARAM (imm_offset
, 0),
213 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
216 static const struct cpu_regmove_cost generic_regmove_cost
=
218 NAMED_PARAM (GP2GP
, 1),
219 NAMED_PARAM (GP2FP
, 2),
220 NAMED_PARAM (FP2GP
, 2),
221 /* We currently do not provide direct support for TFmode Q->Q move.
222 Therefore we need to raise the cost above 2 in order to have
223 reload handle the situation. */
224 NAMED_PARAM (FP2FP
, 4)
227 /* Generic costs for vector insn classes. */
228 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
231 static const struct cpu_vector_cost generic_vector_cost
=
233 NAMED_PARAM (scalar_stmt_cost
, 1),
234 NAMED_PARAM (scalar_load_cost
, 1),
235 NAMED_PARAM (scalar_store_cost
, 1),
236 NAMED_PARAM (vec_stmt_cost
, 1),
237 NAMED_PARAM (vec_to_scalar_cost
, 1),
238 NAMED_PARAM (scalar_to_vec_cost
, 1),
239 NAMED_PARAM (vec_align_load_cost
, 1),
240 NAMED_PARAM (vec_unalign_load_cost
, 1),
241 NAMED_PARAM (vec_unalign_store_cost
, 1),
242 NAMED_PARAM (vec_store_cost
, 1),
243 NAMED_PARAM (cond_taken_branch_cost
, 3),
244 NAMED_PARAM (cond_not_taken_branch_cost
, 1)
247 /* Generic costs for vector insn classes. */
248 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
251 static const struct cpu_vector_cost cortexa57_vector_cost
=
253 NAMED_PARAM (scalar_stmt_cost
, 1),
254 NAMED_PARAM (scalar_load_cost
, 4),
255 NAMED_PARAM (scalar_store_cost
, 1),
256 NAMED_PARAM (vec_stmt_cost
, 3),
257 NAMED_PARAM (vec_to_scalar_cost
, 8),
258 NAMED_PARAM (scalar_to_vec_cost
, 8),
259 NAMED_PARAM (vec_align_load_cost
, 5),
260 NAMED_PARAM (vec_unalign_load_cost
, 5),
261 NAMED_PARAM (vec_unalign_store_cost
, 1),
262 NAMED_PARAM (vec_store_cost
, 1),
263 NAMED_PARAM (cond_taken_branch_cost
, 1),
264 NAMED_PARAM (cond_not_taken_branch_cost
, 1)
267 #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
270 static const struct tune_params generic_tunings
=
272 &cortexa57_extra_costs
,
273 &generic_addrcost_table
,
274 &generic_regmove_cost
,
275 &generic_vector_cost
,
276 NAMED_PARAM (memmov_cost
, 4),
277 NAMED_PARAM (issue_rate
, 2)
280 static const struct tune_params cortexa53_tunings
=
282 &cortexa53_extra_costs
,
283 &generic_addrcost_table
,
284 &generic_regmove_cost
,
285 &generic_vector_cost
,
286 NAMED_PARAM (memmov_cost
, 4),
287 NAMED_PARAM (issue_rate
, 2)
290 static const struct tune_params cortexa57_tunings
=
292 &cortexa57_extra_costs
,
293 &cortexa57_addrcost_table
,
294 &generic_regmove_cost
,
295 &cortexa57_vector_cost
,
296 NAMED_PARAM (memmov_cost
, 4),
297 NAMED_PARAM (issue_rate
, 3)
300 /* A processor implementing AArch64. */
303 const char *const name
;
304 enum aarch64_processor core
;
306 const unsigned long flags
;
307 const struct tune_params
*const tune
;
310 /* Processor cores implementing AArch64. */
311 static const struct processor all_cores
[] =
313 #define AARCH64_CORE(NAME, X, IDENT, ARCH, FLAGS, COSTS) \
314 {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
315 #include "aarch64-cores.def"
317 {"generic", cortexa53
, "8", AARCH64_FL_FPSIMD
| AARCH64_FL_FOR_ARCH8
, &generic_tunings
},
318 {NULL
, aarch64_none
, NULL
, 0, NULL
}
321 /* Architectures implementing AArch64. */
322 static const struct processor all_architectures
[] =
324 #define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
325 {NAME, CORE, #ARCH, FLAGS, NULL},
326 #include "aarch64-arches.def"
328 {NULL
, aarch64_none
, NULL
, 0, NULL
}
331 /* Target specification. These are populated as commandline arguments
332 are processed, or NULL if not specified. */
333 static const struct processor
*selected_arch
;
334 static const struct processor
*selected_cpu
;
335 static const struct processor
*selected_tune
;
337 #define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
339 /* An ISA extension in the co-processor and main instruction set space. */
340 struct aarch64_option_extension
342 const char *const name
;
343 const unsigned long flags_on
;
344 const unsigned long flags_off
;
347 /* ISA extensions in AArch64. */
348 static const struct aarch64_option_extension all_extensions
[] =
350 #define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
351 {NAME, FLAGS_ON, FLAGS_OFF},
352 #include "aarch64-option-extensions.def"
353 #undef AARCH64_OPT_EXTENSION
357 /* Used to track the size of an address when generating a pre/post
358 increment address. */
359 static enum machine_mode aarch64_memory_reference_mode
;
361 /* Used to force GTY into this file. */
362 static GTY(()) int gty_dummy
;
364 /* A table of valid AArch64 "bitmask immediate" values for
365 logical instructions. */
367 #define AARCH64_NUM_BITMASKS 5334
368 static unsigned HOST_WIDE_INT aarch64_bitmasks
[AARCH64_NUM_BITMASKS
];
370 typedef enum aarch64_cond_code
372 AARCH64_EQ
= 0, AARCH64_NE
, AARCH64_CS
, AARCH64_CC
, AARCH64_MI
, AARCH64_PL
,
373 AARCH64_VS
, AARCH64_VC
, AARCH64_HI
, AARCH64_LS
, AARCH64_GE
, AARCH64_LT
,
374 AARCH64_GT
, AARCH64_LE
, AARCH64_AL
, AARCH64_NV
378 #define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
380 /* The condition codes of the processor, and the inverse function. */
381 static const char * const aarch64_condition_codes
[] =
383 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
384 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
387 /* Provide a mapping from gcc register numbers to dwarf register numbers. */
389 aarch64_dbx_register_number (unsigned regno
)
391 if (GP_REGNUM_P (regno
))
392 return AARCH64_DWARF_R0
+ regno
- R0_REGNUM
;
393 else if (regno
== SP_REGNUM
)
394 return AARCH64_DWARF_SP
;
395 else if (FP_REGNUM_P (regno
))
396 return AARCH64_DWARF_V0
+ regno
- V0_REGNUM
;
398 /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
399 equivalent DWARF register. */
400 return DWARF_FRAME_REGISTERS
;
403 /* Return TRUE if MODE is any of the large INT modes. */
405 aarch64_vect_struct_mode_p (enum machine_mode mode
)
407 return mode
== OImode
|| mode
== CImode
|| mode
== XImode
;
410 /* Return TRUE if MODE is any of the vector modes. */
412 aarch64_vector_mode_p (enum machine_mode mode
)
414 return aarch64_vector_mode_supported_p (mode
)
415 || aarch64_vect_struct_mode_p (mode
);
418 /* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
420 aarch64_array_mode_supported_p (enum machine_mode mode
,
421 unsigned HOST_WIDE_INT nelems
)
424 && AARCH64_VALID_SIMD_QREG_MODE (mode
)
425 && (nelems
>= 2 && nelems
<= 4))
431 /* Implement HARD_REGNO_NREGS. */
434 aarch64_hard_regno_nregs (unsigned regno
, enum machine_mode mode
)
436 switch (aarch64_regno_regclass (regno
))
440 return (GET_MODE_SIZE (mode
) + UNITS_PER_VREG
- 1) / UNITS_PER_VREG
;
442 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
447 /* Implement HARD_REGNO_MODE_OK. */
450 aarch64_hard_regno_mode_ok (unsigned regno
, enum machine_mode mode
)
452 if (GET_MODE_CLASS (mode
) == MODE_CC
)
453 return regno
== CC_REGNUM
;
455 if (regno
== SP_REGNUM
)
456 /* The purpose of comparing with ptr_mode is to support the
457 global register variable associated with the stack pointer
458 register via the syntax of asm ("wsp") in ILP32. */
459 return mode
== Pmode
|| mode
== ptr_mode
;
461 if (regno
== FRAME_POINTER_REGNUM
|| regno
== ARG_POINTER_REGNUM
)
462 return mode
== Pmode
;
464 if (GP_REGNUM_P (regno
) && ! aarch64_vect_struct_mode_p (mode
))
467 if (FP_REGNUM_P (regno
))
469 if (aarch64_vect_struct_mode_p (mode
))
471 (regno
+ aarch64_hard_regno_nregs (regno
, mode
) - 1) <= V31_REGNUM
;
479 /* Implement HARD_REGNO_CALLER_SAVE_MODE. */
481 aarch64_hard_regno_caller_save_mode (unsigned regno
, unsigned nregs
,
482 enum machine_mode mode
)
484 /* Handle modes that fit within single registers. */
485 if (nregs
== 1 && GET_MODE_SIZE (mode
) <= 16)
487 if (GET_MODE_SIZE (mode
) >= 4)
492 /* Fall back to generic for multi-reg and very large modes. */
494 return choose_hard_reg_mode (regno
, nregs
, false);
497 /* Return true if calls to DECL should be treated as
498 long-calls (ie called via a register). */
500 aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED
)
505 /* Return true if calls to symbol-ref SYM should be treated as
506 long-calls (ie called via a register). */
508 aarch64_is_long_call_p (rtx sym
)
510 return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym
));
513 /* Return true if the offsets to a zero/sign-extract operation
514 represent an expression that matches an extend operation. The
515 operands represent the paramters from
517 (extract:MODE (mult (reg) (MULT_IMM)) (EXTRACT_IMM) (const_int 0)). */
519 aarch64_is_extend_from_extract (enum machine_mode mode
, rtx mult_imm
,
522 HOST_WIDE_INT mult_val
, extract_val
;
524 if (! CONST_INT_P (mult_imm
) || ! CONST_INT_P (extract_imm
))
527 mult_val
= INTVAL (mult_imm
);
528 extract_val
= INTVAL (extract_imm
);
531 && extract_val
< GET_MODE_BITSIZE (mode
)
532 && exact_log2 (extract_val
& ~7) > 0
533 && (extract_val
& 7) <= 4
534 && mult_val
== (1 << (extract_val
& 7)))
540 /* Emit an insn that's a simple single-set. Both the operands must be
541 known to be valid. */
543 emit_set_insn (rtx x
, rtx y
)
545 return emit_insn (gen_rtx_SET (VOIDmode
, x
, y
));
548 /* X and Y are two things to compare using CODE. Emit the compare insn and
549 return the rtx for register 0 in the proper mode. */
551 aarch64_gen_compare_reg (RTX_CODE code
, rtx x
, rtx y
)
553 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
554 rtx cc_reg
= gen_rtx_REG (mode
, CC_REGNUM
);
556 emit_set_insn (cc_reg
, gen_rtx_COMPARE (mode
, x
, y
));
560 /* Build the SYMBOL_REF for __tls_get_addr. */
562 static GTY(()) rtx tls_get_addr_libfunc
;
565 aarch64_tls_get_addr (void)
567 if (!tls_get_addr_libfunc
)
568 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
569 return tls_get_addr_libfunc
;
572 /* Return the TLS model to use for ADDR. */
574 static enum tls_model
575 tls_symbolic_operand_type (rtx addr
)
577 enum tls_model tls_kind
= TLS_MODEL_NONE
;
580 if (GET_CODE (addr
) == CONST
)
582 split_const (addr
, &sym
, &addend
);
583 if (GET_CODE (sym
) == SYMBOL_REF
)
584 tls_kind
= SYMBOL_REF_TLS_MODEL (sym
);
586 else if (GET_CODE (addr
) == SYMBOL_REF
)
587 tls_kind
= SYMBOL_REF_TLS_MODEL (addr
);
592 /* We'll allow lo_sum's in addresses in our legitimate addresses
593 so that combine would take care of combining addresses where
594 necessary, but for generation purposes, we'll generate the address
597 tmp = hi (symbol_ref); adrp x1, foo
598 dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
602 adrp x1, :got:foo adrp tmp, :tlsgd:foo
603 ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
607 Load TLS symbol, depending on TLS mechanism and TLS access model.
609 Global Dynamic - Traditional TLS:
611 add dest, tmp, #:tlsgd_lo12:imm
614 Global Dynamic - TLS Descriptors:
615 adrp dest, :tlsdesc:imm
616 ldr tmp, [dest, #:tlsdesc_lo12:imm]
617 add dest, dest, #:tlsdesc_lo12:imm
624 adrp tmp, :gottprel:imm
625 ldr dest, [tmp, #:gottprel_lo12:imm]
630 add t0, tp, #:tprel_hi12:imm
631 add t0, #:tprel_lo12_nc:imm
635 aarch64_load_symref_appropriately (rtx dest
, rtx imm
,
636 enum aarch64_symbol_type type
)
640 case SYMBOL_SMALL_ABSOLUTE
:
642 /* In ILP32, the mode of dest can be either SImode or DImode. */
644 enum machine_mode mode
= GET_MODE (dest
);
646 gcc_assert (mode
== Pmode
|| mode
== ptr_mode
);
648 if (can_create_pseudo_p ())
649 tmp_reg
= gen_reg_rtx (mode
);
651 emit_move_insn (tmp_reg
, gen_rtx_HIGH (mode
, imm
));
652 emit_insn (gen_add_losym (dest
, tmp_reg
, imm
));
656 case SYMBOL_TINY_ABSOLUTE
:
657 emit_insn (gen_rtx_SET (Pmode
, dest
, imm
));
660 case SYMBOL_SMALL_GOT
:
662 /* In ILP32, the mode of dest can be either SImode or DImode,
663 while the got entry is always of SImode size. The mode of
664 dest depends on how dest is used: if dest is assigned to a
665 pointer (e.g. in the memory), it has SImode; it may have
666 DImode if dest is dereferenced to access the memeory.
667 This is why we have to handle three different ldr_got_small
668 patterns here (two patterns for ILP32). */
670 enum machine_mode mode
= GET_MODE (dest
);
672 if (can_create_pseudo_p ())
673 tmp_reg
= gen_reg_rtx (mode
);
675 emit_move_insn (tmp_reg
, gen_rtx_HIGH (mode
, imm
));
676 if (mode
== ptr_mode
)
679 emit_insn (gen_ldr_got_small_di (dest
, tmp_reg
, imm
));
681 emit_insn (gen_ldr_got_small_si (dest
, tmp_reg
, imm
));
685 gcc_assert (mode
== Pmode
);
686 emit_insn (gen_ldr_got_small_sidi (dest
, tmp_reg
, imm
));
692 case SYMBOL_SMALL_TLSGD
:
695 rtx result
= gen_rtx_REG (Pmode
, R0_REGNUM
);
698 aarch64_emit_call_insn (gen_tlsgd_small (result
, imm
));
699 insns
= get_insns ();
702 RTL_CONST_CALL_P (insns
) = 1;
703 emit_libcall_block (insns
, dest
, result
, imm
);
707 case SYMBOL_SMALL_TLSDESC
:
709 enum machine_mode mode
= GET_MODE (dest
);
710 rtx x0
= gen_rtx_REG (mode
, R0_REGNUM
);
713 gcc_assert (mode
== Pmode
|| mode
== ptr_mode
);
715 /* In ILP32, the got entry is always of SImode size. Unlike
716 small GOT, the dest is fixed at reg 0. */
718 emit_insn (gen_tlsdesc_small_si (imm
));
720 emit_insn (gen_tlsdesc_small_di (imm
));
721 tp
= aarch64_load_tp (NULL
);
724 tp
= gen_lowpart (mode
, tp
);
726 emit_insn (gen_rtx_SET (mode
, dest
, gen_rtx_PLUS (mode
, tp
, x0
)));
727 set_unique_reg_note (get_last_insn (), REG_EQUIV
, imm
);
731 case SYMBOL_SMALL_GOTTPREL
:
733 /* In ILP32, the mode of dest can be either SImode or DImode,
734 while the got entry is always of SImode size. The mode of
735 dest depends on how dest is used: if dest is assigned to a
736 pointer (e.g. in the memory), it has SImode; it may have
737 DImode if dest is dereferenced to access the memeory.
738 This is why we have to handle three different tlsie_small
739 patterns here (two patterns for ILP32). */
740 enum machine_mode mode
= GET_MODE (dest
);
741 rtx tmp_reg
= gen_reg_rtx (mode
);
742 rtx tp
= aarch64_load_tp (NULL
);
744 if (mode
== ptr_mode
)
747 emit_insn (gen_tlsie_small_di (tmp_reg
, imm
));
750 emit_insn (gen_tlsie_small_si (tmp_reg
, imm
));
751 tp
= gen_lowpart (mode
, tp
);
756 gcc_assert (mode
== Pmode
);
757 emit_insn (gen_tlsie_small_sidi (tmp_reg
, imm
));
760 emit_insn (gen_rtx_SET (mode
, dest
, gen_rtx_PLUS (mode
, tp
, tmp_reg
)));
761 set_unique_reg_note (get_last_insn (), REG_EQUIV
, imm
);
765 case SYMBOL_SMALL_TPREL
:
767 rtx tp
= aarch64_load_tp (NULL
);
768 emit_insn (gen_tlsle_small (dest
, tp
, imm
));
769 set_unique_reg_note (get_last_insn (), REG_EQUIV
, imm
);
773 case SYMBOL_TINY_GOT
:
774 emit_insn (gen_ldr_got_tiny (dest
, imm
));
782 /* Emit a move from SRC to DEST. Assume that the move expanders can
783 handle all moves if !can_create_pseudo_p (). The distinction is
784 important because, unlike emit_move_insn, the move expanders know
785 how to force Pmode objects into the constant pool even when the
786 constant pool address is not itself legitimate. */
788 aarch64_emit_move (rtx dest
, rtx src
)
790 return (can_create_pseudo_p ()
791 ? emit_move_insn (dest
, src
)
792 : emit_move_insn_1 (dest
, src
));
795 /* Split a 128-bit move operation into two 64-bit move operations,
796 taking care to handle partial overlap of register to register
797 copies. Special cases are needed when moving between GP regs and
798 FP regs. SRC can be a register, constant or memory; DST a register
799 or memory. If either operand is memory it must not have any side
802 aarch64_split_128bit_move (rtx dst
, rtx src
)
807 enum machine_mode mode
= GET_MODE (dst
);
809 gcc_assert (mode
== TImode
|| mode
== TFmode
);
810 gcc_assert (!(side_effects_p (src
) || side_effects_p (dst
)));
811 gcc_assert (mode
== GET_MODE (src
) || GET_MODE (src
) == VOIDmode
);
813 if (REG_P (dst
) && REG_P (src
))
815 int src_regno
= REGNO (src
);
816 int dst_regno
= REGNO (dst
);
818 /* Handle FP <-> GP regs. */
819 if (FP_REGNUM_P (dst_regno
) && GP_REGNUM_P (src_regno
))
821 src_lo
= gen_lowpart (word_mode
, src
);
822 src_hi
= gen_highpart (word_mode
, src
);
826 emit_insn (gen_aarch64_movtilow_di (dst
, src_lo
));
827 emit_insn (gen_aarch64_movtihigh_di (dst
, src_hi
));
831 emit_insn (gen_aarch64_movtflow_di (dst
, src_lo
));
832 emit_insn (gen_aarch64_movtfhigh_di (dst
, src_hi
));
836 else if (GP_REGNUM_P (dst_regno
) && FP_REGNUM_P (src_regno
))
838 dst_lo
= gen_lowpart (word_mode
, dst
);
839 dst_hi
= gen_highpart (word_mode
, dst
);
843 emit_insn (gen_aarch64_movdi_tilow (dst_lo
, src
));
844 emit_insn (gen_aarch64_movdi_tihigh (dst_hi
, src
));
848 emit_insn (gen_aarch64_movdi_tflow (dst_lo
, src
));
849 emit_insn (gen_aarch64_movdi_tfhigh (dst_hi
, src
));
855 dst_lo
= gen_lowpart (word_mode
, dst
);
856 dst_hi
= gen_highpart (word_mode
, dst
);
857 src_lo
= gen_lowpart (word_mode
, src
);
858 src_hi
= gen_highpart_mode (word_mode
, mode
, src
);
860 /* At most one pairing may overlap. */
861 if (reg_overlap_mentioned_p (dst_lo
, src_hi
))
863 aarch64_emit_move (dst_hi
, src_hi
);
864 aarch64_emit_move (dst_lo
, src_lo
);
868 aarch64_emit_move (dst_lo
, src_lo
);
869 aarch64_emit_move (dst_hi
, src_hi
);
874 aarch64_split_128bit_move_p (rtx dst
, rtx src
)
876 return (! REG_P (src
)
877 || ! (FP_REGNUM_P (REGNO (dst
)) && FP_REGNUM_P (REGNO (src
))));
880 /* Split a complex SIMD combine. */
883 aarch64_split_simd_combine (rtx dst
, rtx src1
, rtx src2
)
885 enum machine_mode src_mode
= GET_MODE (src1
);
886 enum machine_mode dst_mode
= GET_MODE (dst
);
888 gcc_assert (VECTOR_MODE_P (dst_mode
));
890 if (REG_P (dst
) && REG_P (src1
) && REG_P (src2
))
892 rtx (*gen
) (rtx
, rtx
, rtx
);
897 gen
= gen_aarch64_simd_combinev8qi
;
900 gen
= gen_aarch64_simd_combinev4hi
;
903 gen
= gen_aarch64_simd_combinev2si
;
906 gen
= gen_aarch64_simd_combinev2sf
;
909 gen
= gen_aarch64_simd_combinedi
;
912 gen
= gen_aarch64_simd_combinedf
;
918 emit_insn (gen (dst
, src1
, src2
));
923 /* Split a complex SIMD move. */
926 aarch64_split_simd_move (rtx dst
, rtx src
)
928 enum machine_mode src_mode
= GET_MODE (src
);
929 enum machine_mode dst_mode
= GET_MODE (dst
);
931 gcc_assert (VECTOR_MODE_P (dst_mode
));
933 if (REG_P (dst
) && REG_P (src
))
935 rtx (*gen
) (rtx
, rtx
);
937 gcc_assert (VECTOR_MODE_P (src_mode
));
942 gen
= gen_aarch64_split_simd_movv16qi
;
945 gen
= gen_aarch64_split_simd_movv8hi
;
948 gen
= gen_aarch64_split_simd_movv4si
;
951 gen
= gen_aarch64_split_simd_movv2di
;
954 gen
= gen_aarch64_split_simd_movv4sf
;
957 gen
= gen_aarch64_split_simd_movv2df
;
963 emit_insn (gen (dst
, src
));
969 aarch64_force_temporary (enum machine_mode mode
, rtx x
, rtx value
)
971 if (can_create_pseudo_p ())
972 return force_reg (mode
, value
);
975 x
= aarch64_emit_move (x
, value
);
982 aarch64_add_offset (enum machine_mode mode
, rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
984 if (!aarch64_plus_immediate (GEN_INT (offset
), mode
))
987 /* Load the full offset into a register. This
988 might be improvable in the future. */
989 high
= GEN_INT (offset
);
991 high
= aarch64_force_temporary (mode
, temp
, high
);
992 reg
= aarch64_force_temporary (mode
, temp
,
993 gen_rtx_PLUS (mode
, high
, reg
));
995 return plus_constant (mode
, reg
, offset
);
999 aarch64_expand_mov_immediate (rtx dest
, rtx imm
)
1001 enum machine_mode mode
= GET_MODE (dest
);
1002 unsigned HOST_WIDE_INT mask
;
1005 unsigned HOST_WIDE_INT val
;
1008 int one_match
, zero_match
, first_not_ffff_match
;
1010 gcc_assert (mode
== SImode
|| mode
== DImode
);
1012 /* Check on what type of symbol it is. */
1013 if (GET_CODE (imm
) == SYMBOL_REF
1014 || GET_CODE (imm
) == LABEL_REF
1015 || GET_CODE (imm
) == CONST
)
1017 rtx mem
, base
, offset
;
1018 enum aarch64_symbol_type sty
;
1020 /* If we have (const (plus symbol offset)), separate out the offset
1021 before we start classifying the symbol. */
1022 split_const (imm
, &base
, &offset
);
1024 sty
= aarch64_classify_symbol (base
, SYMBOL_CONTEXT_ADR
);
1027 case SYMBOL_FORCE_TO_MEM
:
1028 if (offset
!= const0_rtx
1029 && targetm
.cannot_force_const_mem (mode
, imm
))
1031 gcc_assert (can_create_pseudo_p ());
1032 base
= aarch64_force_temporary (mode
, dest
, base
);
1033 base
= aarch64_add_offset (mode
, NULL
, base
, INTVAL (offset
));
1034 aarch64_emit_move (dest
, base
);
1037 mem
= force_const_mem (ptr_mode
, imm
);
1039 if (mode
!= ptr_mode
)
1040 mem
= gen_rtx_ZERO_EXTEND (mode
, mem
);
1041 emit_insn (gen_rtx_SET (VOIDmode
, dest
, mem
));
1044 case SYMBOL_SMALL_TLSGD
:
1045 case SYMBOL_SMALL_TLSDESC
:
1046 case SYMBOL_SMALL_GOTTPREL
:
1047 case SYMBOL_SMALL_GOT
:
1048 case SYMBOL_TINY_GOT
:
1049 if (offset
!= const0_rtx
)
1051 gcc_assert(can_create_pseudo_p ());
1052 base
= aarch64_force_temporary (mode
, dest
, base
);
1053 base
= aarch64_add_offset (mode
, NULL
, base
, INTVAL (offset
));
1054 aarch64_emit_move (dest
, base
);
1059 case SYMBOL_SMALL_TPREL
:
1060 case SYMBOL_SMALL_ABSOLUTE
:
1061 case SYMBOL_TINY_ABSOLUTE
:
1062 aarch64_load_symref_appropriately (dest
, imm
, sty
);
1070 if (CONST_INT_P (imm
) && aarch64_move_imm (INTVAL (imm
), mode
))
1072 emit_insn (gen_rtx_SET (VOIDmode
, dest
, imm
));
1076 if (!CONST_INT_P (imm
))
1078 if (GET_CODE (imm
) == HIGH
)
1079 emit_insn (gen_rtx_SET (VOIDmode
, dest
, imm
));
1082 rtx mem
= force_const_mem (mode
, imm
);
1084 emit_insn (gen_rtx_SET (VOIDmode
, dest
, mem
));
1092 /* We know we can't do this in 1 insn, and we must be able to do it
1093 in two; so don't mess around looking for sequences that don't buy
1095 emit_insn (gen_rtx_SET (VOIDmode
, dest
, GEN_INT (INTVAL (imm
) & 0xffff)));
1096 emit_insn (gen_insv_immsi (dest
, GEN_INT (16),
1097 GEN_INT ((INTVAL (imm
) >> 16) & 0xffff)));
1101 /* Remaining cases are all for DImode. */
1104 subtargets
= optimize
&& can_create_pseudo_p ();
1109 first_not_ffff_match
= -1;
1111 for (i
= 0; i
< 64; i
+= 16, mask
<<= 16)
1113 if ((val
& mask
) == mask
)
1117 if (first_not_ffff_match
< 0)
1118 first_not_ffff_match
= i
;
1119 if ((val
& mask
) == 0)
1126 /* Set one of the quarters and then insert back into result. */
1127 mask
= 0xffffll
<< first_not_ffff_match
;
1128 emit_insn (gen_rtx_SET (VOIDmode
, dest
, GEN_INT (val
| mask
)));
1129 emit_insn (gen_insv_immdi (dest
, GEN_INT (first_not_ffff_match
),
1130 GEN_INT ((val
>> first_not_ffff_match
)
1135 if (zero_match
== 2)
1136 goto simple_sequence
;
1138 mask
= 0x0ffff0000UL
;
1139 for (i
= 16; i
< 64; i
+= 16, mask
<<= 16)
1141 HOST_WIDE_INT comp
= mask
& ~(mask
- 1);
1143 if (aarch64_uimm12_shift (val
- (val
& mask
)))
1145 subtarget
= subtargets
? gen_reg_rtx (DImode
) : dest
;
1147 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, GEN_INT (val
& mask
)));
1148 emit_insn (gen_adddi3 (dest
, subtarget
,
1149 GEN_INT (val
- (val
& mask
))));
1152 else if (aarch64_uimm12_shift (-(val
- ((val
+ comp
) & mask
))))
1154 subtarget
= subtargets
? gen_reg_rtx (DImode
) : dest
;
1156 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
,
1157 GEN_INT ((val
+ comp
) & mask
)));
1158 emit_insn (gen_adddi3 (dest
, subtarget
,
1159 GEN_INT (val
- ((val
+ comp
) & mask
))));
1162 else if (aarch64_uimm12_shift (val
- ((val
- comp
) | ~mask
)))
1164 subtarget
= subtargets
? gen_reg_rtx (DImode
) : dest
;
1166 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
,
1167 GEN_INT ((val
- comp
) | ~mask
)));
1168 emit_insn (gen_adddi3 (dest
, subtarget
,
1169 GEN_INT (val
- ((val
- comp
) | ~mask
))));
1172 else if (aarch64_uimm12_shift (-(val
- (val
| ~mask
))))
1174 subtarget
= subtargets
? gen_reg_rtx (DImode
) : dest
;
1176 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
,
1177 GEN_INT (val
| ~mask
)));
1178 emit_insn (gen_adddi3 (dest
, subtarget
,
1179 GEN_INT (val
- (val
| ~mask
))));
1184 /* See if we can do it by arithmetically combining two
1186 for (i
= 0; i
< AARCH64_NUM_BITMASKS
; i
++)
1191 if (aarch64_uimm12_shift (val
- aarch64_bitmasks
[i
])
1192 || aarch64_uimm12_shift (-val
+ aarch64_bitmasks
[i
]))
1194 subtarget
= subtargets
? gen_reg_rtx (DImode
) : dest
;
1195 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
,
1196 GEN_INT (aarch64_bitmasks
[i
])));
1197 emit_insn (gen_adddi3 (dest
, subtarget
,
1198 GEN_INT (val
- aarch64_bitmasks
[i
])));
1202 for (j
= 0; j
< 64; j
+= 16, mask
<<= 16)
1204 if ((aarch64_bitmasks
[i
] & ~mask
) == (val
& ~mask
))
1206 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
1207 GEN_INT (aarch64_bitmasks
[i
])));
1208 emit_insn (gen_insv_immdi (dest
, GEN_INT (j
),
1209 GEN_INT ((val
>> j
) & 0xffff)));
1215 /* See if we can do it by logically combining two immediates. */
1216 for (i
= 0; i
< AARCH64_NUM_BITMASKS
; i
++)
1218 if ((aarch64_bitmasks
[i
] & val
) == aarch64_bitmasks
[i
])
1222 for (j
= i
+ 1; j
< AARCH64_NUM_BITMASKS
; j
++)
1223 if (val
== (aarch64_bitmasks
[i
] | aarch64_bitmasks
[j
]))
1225 subtarget
= subtargets
? gen_reg_rtx (mode
) : dest
;
1226 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
,
1227 GEN_INT (aarch64_bitmasks
[i
])));
1228 emit_insn (gen_iordi3 (dest
, subtarget
,
1229 GEN_INT (aarch64_bitmasks
[j
])));
1233 else if ((val
& aarch64_bitmasks
[i
]) == val
)
1237 for (j
= i
+ 1; j
< AARCH64_NUM_BITMASKS
; j
++)
1238 if (val
== (aarch64_bitmasks
[j
] & aarch64_bitmasks
[i
]))
1241 subtarget
= subtargets
? gen_reg_rtx (mode
) : dest
;
1242 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
,
1243 GEN_INT (aarch64_bitmasks
[j
])));
1244 emit_insn (gen_anddi3 (dest
, subtarget
,
1245 GEN_INT (aarch64_bitmasks
[i
])));
1251 if (one_match
> zero_match
)
1253 /* Set either first three quarters or all but the third. */
1254 mask
= 0xffffll
<< (16 - first_not_ffff_match
);
1255 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
1256 GEN_INT (val
| mask
| 0xffffffff00000000ull
)));
1258 /* Now insert other two quarters. */
1259 for (i
= first_not_ffff_match
+ 16, mask
<<= (first_not_ffff_match
<< 1);
1260 i
< 64; i
+= 16, mask
<<= 16)
1262 if ((val
& mask
) != mask
)
1263 emit_insn (gen_insv_immdi (dest
, GEN_INT (i
),
1264 GEN_INT ((val
>> i
) & 0xffff)));
1272 for (i
= 0; i
< 64; i
+= 16, mask
<<= 16)
1274 if ((val
& mask
) != 0)
1278 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
1279 GEN_INT (val
& mask
)));
1283 emit_insn (gen_insv_immdi (dest
, GEN_INT (i
),
1284 GEN_INT ((val
>> i
) & 0xffff)));
1290 aarch64_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED
,
1291 tree exp ATTRIBUTE_UNUSED
)
1293 /* Currently, always true. */
1297 /* Implement TARGET_PASS_BY_REFERENCE. */
1300 aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED
,
1301 enum machine_mode mode
,
1303 bool named ATTRIBUTE_UNUSED
)
1306 enum machine_mode dummymode
;
1309 /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
1310 size
= (mode
== BLKmode
&& type
)
1311 ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
1313 /* Aggregates are passed by reference based on their size. */
1314 if (type
&& AGGREGATE_TYPE_P (type
))
1316 size
= int_size_in_bytes (type
);
1319 /* Variable sized arguments are always returned by reference. */
1323 /* Can this be a candidate to be passed in fp/simd register(s)? */
1324 if (aarch64_vfp_is_call_or_return_candidate (mode
, type
,
1329 /* Arguments which are variable sized or larger than 2 registers are
1330 passed by reference unless they are a homogenous floating point
1332 return size
> 2 * UNITS_PER_WORD
;
1335 /* Return TRUE if VALTYPE is padded to its least significant bits. */
1337 aarch64_return_in_msb (const_tree valtype
)
1339 enum machine_mode dummy_mode
;
1342 /* Never happens in little-endian mode. */
1343 if (!BYTES_BIG_ENDIAN
)
1346 /* Only composite types smaller than or equal to 16 bytes can
1347 be potentially returned in registers. */
1348 if (!aarch64_composite_type_p (valtype
, TYPE_MODE (valtype
))
1349 || int_size_in_bytes (valtype
) <= 0
1350 || int_size_in_bytes (valtype
) > 16)
1353 /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
1354 or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
1355 is always passed/returned in the least significant bits of fp/simd
1357 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype
), valtype
,
1358 &dummy_mode
, &dummy_int
, NULL
))
1364 /* Implement TARGET_FUNCTION_VALUE.
1365 Define how to find the value returned by a function. */
1368 aarch64_function_value (const_tree type
, const_tree func
,
1369 bool outgoing ATTRIBUTE_UNUSED
)
1371 enum machine_mode mode
;
1374 enum machine_mode ag_mode
;
1376 mode
= TYPE_MODE (type
);
1377 if (INTEGRAL_TYPE_P (type
))
1378 mode
= promote_function_mode (type
, mode
, &unsignedp
, func
, 1);
1380 if (aarch64_return_in_msb (type
))
1382 HOST_WIDE_INT size
= int_size_in_bytes (type
);
1384 if (size
% UNITS_PER_WORD
!= 0)
1386 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
1387 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
1391 if (aarch64_vfp_is_call_or_return_candidate (mode
, type
,
1392 &ag_mode
, &count
, NULL
))
1394 if (!aarch64_composite_type_p (type
, mode
))
1396 gcc_assert (count
== 1 && mode
== ag_mode
);
1397 return gen_rtx_REG (mode
, V0_REGNUM
);
1404 par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (count
));
1405 for (i
= 0; i
< count
; i
++)
1407 rtx tmp
= gen_rtx_REG (ag_mode
, V0_REGNUM
+ i
);
1408 tmp
= gen_rtx_EXPR_LIST (VOIDmode
, tmp
,
1409 GEN_INT (i
* GET_MODE_SIZE (ag_mode
)));
1410 XVECEXP (par
, 0, i
) = tmp
;
1416 return gen_rtx_REG (mode
, R0_REGNUM
);
1419 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.
1420 Return true if REGNO is the number of a hard register in which the values
1421 of called function may come back. */
1424 aarch64_function_value_regno_p (const unsigned int regno
)
1426 /* Maximum of 16 bytes can be returned in the general registers. Examples
1427 of 16-byte return values are: 128-bit integers and 16-byte small
1428 structures (excluding homogeneous floating-point aggregates). */
1429 if (regno
== R0_REGNUM
|| regno
== R1_REGNUM
)
1432 /* Up to four fp/simd registers can return a function value, e.g. a
1433 homogeneous floating-point aggregate having four members. */
1434 if (regno
>= V0_REGNUM
&& regno
< V0_REGNUM
+ HA_MAX_NUM_FLDS
)
1435 return !TARGET_GENERAL_REGS_ONLY
;
1440 /* Implement TARGET_RETURN_IN_MEMORY.
1442 If the type T of the result of a function is such that
1444 would require that arg be passed as a value in a register (or set of
1445 registers) according to the parameter passing rules, then the result
1446 is returned in the same registers as would be used for such an
1450 aarch64_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
1453 enum machine_mode ag_mode
;
1456 if (!AGGREGATE_TYPE_P (type
)
1457 && TREE_CODE (type
) != COMPLEX_TYPE
1458 && TREE_CODE (type
) != VECTOR_TYPE
)
1459 /* Simple scalar types always returned in registers. */
1462 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type
),
1469 /* Types larger than 2 registers returned in memory. */
1470 size
= int_size_in_bytes (type
);
1471 return (size
< 0 || size
> 2 * UNITS_PER_WORD
);
1475 aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v
, enum machine_mode mode
,
1476 const_tree type
, int *nregs
)
1478 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
1479 return aarch64_vfp_is_call_or_return_candidate (mode
,
1481 &pcum
->aapcs_vfp_rmode
,
1486 /* Given MODE and TYPE of a function argument, return the alignment in
1487 bits. The idea is to suppress any stronger alignment requested by
1488 the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
1489 This is a helper function for local use only. */
1492 aarch64_function_arg_alignment (enum machine_mode mode
, const_tree type
)
1494 unsigned int alignment
;
1498 if (!integer_zerop (TYPE_SIZE (type
)))
1500 if (TYPE_MODE (type
) == mode
)
1501 alignment
= TYPE_ALIGN (type
);
1503 alignment
= GET_MODE_ALIGNMENT (mode
);
1509 alignment
= GET_MODE_ALIGNMENT (mode
);
1514 /* Layout a function argument according to the AAPCS64 rules. The rule
1515 numbers refer to the rule numbers in the AAPCS64. */
1518 aarch64_layout_arg (cumulative_args_t pcum_v
, enum machine_mode mode
,
1520 bool named ATTRIBUTE_UNUSED
)
1522 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
1523 int ncrn
, nvrn
, nregs
;
1524 bool allocate_ncrn
, allocate_nvrn
;
1527 /* We need to do this once per argument. */
1528 if (pcum
->aapcs_arg_processed
)
1531 pcum
->aapcs_arg_processed
= true;
1533 /* Size in bytes, rounded to the nearest multiple of 8 bytes. */
1535 = AARCH64_ROUND_UP (type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
),
1538 allocate_ncrn
= (type
) ? !(FLOAT_TYPE_P (type
)) : !FLOAT_MODE_P (mode
);
1539 allocate_nvrn
= aarch64_vfp_is_call_candidate (pcum_v
,
1544 /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
1545 The following code thus handles passing by SIMD/FP registers first. */
1547 nvrn
= pcum
->aapcs_nvrn
;
1549 /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
1550 and homogenous short-vector aggregates (HVA). */
1553 if (nvrn
+ nregs
<= NUM_FP_ARG_REGS
)
1555 pcum
->aapcs_nextnvrn
= nvrn
+ nregs
;
1556 if (!aarch64_composite_type_p (type
, mode
))
1558 gcc_assert (nregs
== 1);
1559 pcum
->aapcs_reg
= gen_rtx_REG (mode
, V0_REGNUM
+ nvrn
);
1565 par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nregs
));
1566 for (i
= 0; i
< nregs
; i
++)
1568 rtx tmp
= gen_rtx_REG (pcum
->aapcs_vfp_rmode
,
1569 V0_REGNUM
+ nvrn
+ i
);
1570 tmp
= gen_rtx_EXPR_LIST
1572 GEN_INT (i
* GET_MODE_SIZE (pcum
->aapcs_vfp_rmode
)));
1573 XVECEXP (par
, 0, i
) = tmp
;
1575 pcum
->aapcs_reg
= par
;
1581 /* C.3 NSRN is set to 8. */
1582 pcum
->aapcs_nextnvrn
= NUM_FP_ARG_REGS
;
1587 ncrn
= pcum
->aapcs_ncrn
;
1588 nregs
= size
/ UNITS_PER_WORD
;
1590 /* C6 - C9. though the sign and zero extension semantics are
1591 handled elsewhere. This is the case where the argument fits
1592 entirely general registers. */
1593 if (allocate_ncrn
&& (ncrn
+ nregs
<= NUM_ARG_REGS
))
1595 unsigned int alignment
= aarch64_function_arg_alignment (mode
, type
);
1597 gcc_assert (nregs
== 0 || nregs
== 1 || nregs
== 2);
1599 /* C.8 if the argument has an alignment of 16 then the NGRN is
1600 rounded up to the next even number. */
1601 if (nregs
== 2 && alignment
== 16 * BITS_PER_UNIT
&& ncrn
% 2)
1604 gcc_assert (ncrn
+ nregs
<= NUM_ARG_REGS
);
1606 /* NREGS can be 0 when e.g. an empty structure is to be passed.
1607 A reg is still generated for it, but the caller should be smart
1608 enough not to use it. */
1609 if (nregs
== 0 || nregs
== 1 || GET_MODE_CLASS (mode
) == MODE_INT
)
1611 pcum
->aapcs_reg
= gen_rtx_REG (mode
, R0_REGNUM
+ ncrn
);
1618 par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nregs
));
1619 for (i
= 0; i
< nregs
; i
++)
1621 rtx tmp
= gen_rtx_REG (word_mode
, R0_REGNUM
+ ncrn
+ i
);
1622 tmp
= gen_rtx_EXPR_LIST (VOIDmode
, tmp
,
1623 GEN_INT (i
* UNITS_PER_WORD
));
1624 XVECEXP (par
, 0, i
) = tmp
;
1626 pcum
->aapcs_reg
= par
;
1629 pcum
->aapcs_nextncrn
= ncrn
+ nregs
;
1634 pcum
->aapcs_nextncrn
= NUM_ARG_REGS
;
1636 /* The argument is passed on stack; record the needed number of words for
1637 this argument and align the total size if necessary. */
1639 pcum
->aapcs_stack_words
= size
/ UNITS_PER_WORD
;
1640 if (aarch64_function_arg_alignment (mode
, type
) == 16 * BITS_PER_UNIT
)
1641 pcum
->aapcs_stack_size
= AARCH64_ROUND_UP (pcum
->aapcs_stack_size
,
1642 16 / UNITS_PER_WORD
);
1646 /* Implement TARGET_FUNCTION_ARG. */
1649 aarch64_function_arg (cumulative_args_t pcum_v
, enum machine_mode mode
,
1650 const_tree type
, bool named
)
1652 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
1653 gcc_assert (pcum
->pcs_variant
== ARM_PCS_AAPCS64
);
1655 if (mode
== VOIDmode
)
1658 aarch64_layout_arg (pcum_v
, mode
, type
, named
);
1659 return pcum
->aapcs_reg
;
1663 aarch64_init_cumulative_args (CUMULATIVE_ARGS
*pcum
,
1664 const_tree fntype ATTRIBUTE_UNUSED
,
1665 rtx libname ATTRIBUTE_UNUSED
,
1666 const_tree fndecl ATTRIBUTE_UNUSED
,
1667 unsigned n_named ATTRIBUTE_UNUSED
)
1669 pcum
->aapcs_ncrn
= 0;
1670 pcum
->aapcs_nvrn
= 0;
1671 pcum
->aapcs_nextncrn
= 0;
1672 pcum
->aapcs_nextnvrn
= 0;
1673 pcum
->pcs_variant
= ARM_PCS_AAPCS64
;
1674 pcum
->aapcs_reg
= NULL_RTX
;
1675 pcum
->aapcs_arg_processed
= false;
1676 pcum
->aapcs_stack_words
= 0;
1677 pcum
->aapcs_stack_size
= 0;
1683 aarch64_function_arg_advance (cumulative_args_t pcum_v
,
1684 enum machine_mode mode
,
1688 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
1689 if (pcum
->pcs_variant
== ARM_PCS_AAPCS64
)
1691 aarch64_layout_arg (pcum_v
, mode
, type
, named
);
1692 gcc_assert ((pcum
->aapcs_reg
!= NULL_RTX
)
1693 != (pcum
->aapcs_stack_words
!= 0));
1694 pcum
->aapcs_arg_processed
= false;
1695 pcum
->aapcs_ncrn
= pcum
->aapcs_nextncrn
;
1696 pcum
->aapcs_nvrn
= pcum
->aapcs_nextnvrn
;
1697 pcum
->aapcs_stack_size
+= pcum
->aapcs_stack_words
;
1698 pcum
->aapcs_stack_words
= 0;
1699 pcum
->aapcs_reg
= NULL_RTX
;
1704 aarch64_function_arg_regno_p (unsigned regno
)
1706 return ((GP_REGNUM_P (regno
) && regno
< R0_REGNUM
+ NUM_ARG_REGS
)
1707 || (FP_REGNUM_P (regno
) && regno
< V0_REGNUM
+ NUM_FP_ARG_REGS
));
1710 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
1711 PARM_BOUNDARY bits of alignment, but will be given anything up
1712 to STACK_BOUNDARY bits if the type requires it. This makes sure
1713 that both before and after the layout of each argument, the Next
1714 Stacked Argument Address (NSAA) will have a minimum alignment of
1718 aarch64_function_arg_boundary (enum machine_mode mode
, const_tree type
)
1720 unsigned int alignment
= aarch64_function_arg_alignment (mode
, type
);
1722 if (alignment
< PARM_BOUNDARY
)
1723 alignment
= PARM_BOUNDARY
;
1724 if (alignment
> STACK_BOUNDARY
)
1725 alignment
= STACK_BOUNDARY
;
1729 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
1731 Return true if an argument passed on the stack should be padded upwards,
1732 i.e. if the least-significant byte of the stack slot has useful data.
1734 Small aggregate types are placed in the lowest memory address.
1736 The related parameter passing rules are B.4, C.3, C.5 and C.14. */
1739 aarch64_pad_arg_upward (enum machine_mode mode
, const_tree type
)
1741 /* On little-endian targets, the least significant byte of every stack
1742 argument is passed at the lowest byte address of the stack slot. */
1743 if (!BYTES_BIG_ENDIAN
)
1746 /* Otherwise, integral, floating-point and pointer types are padded downward:
1747 the least significant byte of a stack argument is passed at the highest
1748 byte address of the stack slot. */
1750 ? (INTEGRAL_TYPE_P (type
) || SCALAR_FLOAT_TYPE_P (type
)
1751 || POINTER_TYPE_P (type
))
1752 : (SCALAR_INT_MODE_P (mode
) || SCALAR_FLOAT_MODE_P (mode
)))
1755 /* Everything else padded upward, i.e. data in first byte of stack slot. */
1759 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
1761 It specifies padding for the last (may also be the only)
1762 element of a block move between registers and memory. If
1763 assuming the block is in the memory, padding upward means that
1764 the last element is padded after its highest significant byte,
1765 while in downward padding, the last element is padded at the
1766 its least significant byte side.
1768 Small aggregates and small complex types are always padded
1771 We don't need to worry about homogeneous floating-point or
1772 short-vector aggregates; their move is not affected by the
1773 padding direction determined here. Regardless of endianness,
1774 each element of such an aggregate is put in the least
1775 significant bits of a fp/simd register.
1777 Return !BYTES_BIG_ENDIAN if the least significant byte of the
1778 register has useful data, and return the opposite if the most
1779 significant byte does. */
1782 aarch64_pad_reg_upward (enum machine_mode mode
, const_tree type
,
1783 bool first ATTRIBUTE_UNUSED
)
1786 /* Small composite types are always padded upward. */
1787 if (BYTES_BIG_ENDIAN
&& aarch64_composite_type_p (type
, mode
))
1789 HOST_WIDE_INT size
= (type
? int_size_in_bytes (type
)
1790 : GET_MODE_SIZE (mode
));
1791 if (size
< 2 * UNITS_PER_WORD
)
1795 /* Otherwise, use the default padding. */
1796 return !BYTES_BIG_ENDIAN
;
1799 static enum machine_mode
1800 aarch64_libgcc_cmp_return_mode (void)
1806 aarch64_frame_pointer_required (void)
1808 /* If the function contains dynamic stack allocations, we need to
1809 use the frame pointer to access the static parts of the frame. */
1810 if (cfun
->calls_alloca
)
1813 /* In aarch64_override_options_after_change
1814 flag_omit_leaf_frame_pointer turns off the frame pointer by
1815 default. Turn it back on now if we've not got a leaf
1817 if (flag_omit_leaf_frame_pointer
1818 && (!crtl
->is_leaf
|| df_regs_ever_live_p (LR_REGNUM
)))
1824 /* Mark the registers that need to be saved by the callee and calculate
1825 the size of the callee-saved registers area and frame record (both FP
1826 and LR may be omitted). */
1828 aarch64_layout_frame (void)
1830 HOST_WIDE_INT offset
= 0;
1833 if (reload_completed
&& cfun
->machine
->frame
.laid_out
)
1836 #define SLOT_NOT_REQUIRED (-2)
1837 #define SLOT_REQUIRED (-1)
1839 cfun
->machine
->frame
.wb_candidate1
= FIRST_PSEUDO_REGISTER
;
1840 cfun
->machine
->frame
.wb_candidate2
= FIRST_PSEUDO_REGISTER
;
1842 /* First mark all the registers that really need to be saved... */
1843 for (regno
= R0_REGNUM
; regno
<= R30_REGNUM
; regno
++)
1844 cfun
->machine
->frame
.reg_offset
[regno
] = SLOT_NOT_REQUIRED
;
1846 for (regno
= V0_REGNUM
; regno
<= V31_REGNUM
; regno
++)
1847 cfun
->machine
->frame
.reg_offset
[regno
] = SLOT_NOT_REQUIRED
;
1849 /* ... that includes the eh data registers (if needed)... */
1850 if (crtl
->calls_eh_return
)
1851 for (regno
= 0; EH_RETURN_DATA_REGNO (regno
) != INVALID_REGNUM
; regno
++)
1852 cfun
->machine
->frame
.reg_offset
[EH_RETURN_DATA_REGNO (regno
)]
1855 /* ... and any callee saved register that dataflow says is live. */
1856 for (regno
= R0_REGNUM
; regno
<= R30_REGNUM
; regno
++)
1857 if (df_regs_ever_live_p (regno
)
1858 && !call_used_regs
[regno
])
1859 cfun
->machine
->frame
.reg_offset
[regno
] = SLOT_REQUIRED
;
1861 for (regno
= V0_REGNUM
; regno
<= V31_REGNUM
; regno
++)
1862 if (df_regs_ever_live_p (regno
)
1863 && !call_used_regs
[regno
])
1864 cfun
->machine
->frame
.reg_offset
[regno
] = SLOT_REQUIRED
;
1866 if (frame_pointer_needed
)
1868 /* FP and LR are placed in the linkage record. */
1869 cfun
->machine
->frame
.reg_offset
[R29_REGNUM
] = 0;
1870 cfun
->machine
->frame
.wb_candidate1
= R29_REGNUM
;
1871 cfun
->machine
->frame
.reg_offset
[R30_REGNUM
] = UNITS_PER_WORD
;
1872 cfun
->machine
->frame
.wb_candidate2
= R30_REGNUM
;
1873 cfun
->machine
->frame
.hardfp_offset
= 2 * UNITS_PER_WORD
;
1874 offset
+= 2 * UNITS_PER_WORD
;
1877 /* Now assign stack slots for them. */
1878 for (regno
= R0_REGNUM
; regno
<= R30_REGNUM
; regno
++)
1879 if (cfun
->machine
->frame
.reg_offset
[regno
] == SLOT_REQUIRED
)
1881 cfun
->machine
->frame
.reg_offset
[regno
] = offset
;
1882 if (cfun
->machine
->frame
.wb_candidate1
== FIRST_PSEUDO_REGISTER
)
1883 cfun
->machine
->frame
.wb_candidate1
= regno
;
1884 else if (cfun
->machine
->frame
.wb_candidate2
== FIRST_PSEUDO_REGISTER
)
1885 cfun
->machine
->frame
.wb_candidate2
= regno
;
1886 offset
+= UNITS_PER_WORD
;
1889 for (regno
= V0_REGNUM
; regno
<= V31_REGNUM
; regno
++)
1890 if (cfun
->machine
->frame
.reg_offset
[regno
] == SLOT_REQUIRED
)
1892 cfun
->machine
->frame
.reg_offset
[regno
] = offset
;
1893 if (cfun
->machine
->frame
.wb_candidate1
== FIRST_PSEUDO_REGISTER
)
1894 cfun
->machine
->frame
.wb_candidate1
= regno
;
1895 else if (cfun
->machine
->frame
.wb_candidate2
== FIRST_PSEUDO_REGISTER
1896 && cfun
->machine
->frame
.wb_candidate1
>= V0_REGNUM
)
1897 cfun
->machine
->frame
.wb_candidate2
= regno
;
1898 offset
+= UNITS_PER_WORD
;
1901 cfun
->machine
->frame
.padding0
=
1902 (AARCH64_ROUND_UP (offset
, STACK_BOUNDARY
/ BITS_PER_UNIT
) - offset
);
1903 offset
= AARCH64_ROUND_UP (offset
, STACK_BOUNDARY
/ BITS_PER_UNIT
);
1905 cfun
->machine
->frame
.saved_regs_size
= offset
;
1907 cfun
->machine
->frame
.hard_fp_offset
1908 = AARCH64_ROUND_UP (cfun
->machine
->frame
.saved_varargs_size
1910 + cfun
->machine
->frame
.saved_regs_size
,
1911 STACK_BOUNDARY
/ BITS_PER_UNIT
);
1913 cfun
->machine
->frame
.frame_size
1914 = AARCH64_ROUND_UP (cfun
->machine
->frame
.hard_fp_offset
1915 + crtl
->outgoing_args_size
,
1916 STACK_BOUNDARY
/ BITS_PER_UNIT
);
1918 cfun
->machine
->frame
.laid_out
= true;
1921 /* Make the last instruction frame-related and note that it performs
1922 the operation described by FRAME_PATTERN. */
1925 aarch64_set_frame_expr (rtx frame_pattern
)
1929 insn
= get_last_insn ();
1930 RTX_FRAME_RELATED_P (insn
) = 1;
1931 RTX_FRAME_RELATED_P (frame_pattern
) = 1;
1932 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
1938 aarch64_register_saved_on_entry (int regno
)
1940 return cfun
->machine
->frame
.reg_offset
[regno
] >= 0;
1944 aarch64_next_callee_save (unsigned regno
, unsigned limit
)
1946 while (regno
<= limit
&& !aarch64_register_saved_on_entry (regno
))
1952 aarch64_pushwb_single_reg (enum machine_mode mode
, unsigned regno
,
1953 HOST_WIDE_INT adjustment
)
1955 rtx base_rtx
= stack_pointer_rtx
;
1958 reg
= gen_rtx_REG (mode
, regno
);
1959 mem
= gen_rtx_PRE_MODIFY (Pmode
, base_rtx
,
1960 plus_constant (Pmode
, base_rtx
, -adjustment
));
1961 mem
= gen_rtx_MEM (mode
, mem
);
1963 insn
= emit_move_insn (mem
, reg
);
1964 RTX_FRAME_RELATED_P (insn
) = 1;
1968 aarch64_popwb_single_reg (enum machine_mode mode
, unsigned regno
,
1969 HOST_WIDE_INT adjustment
)
1971 rtx base_rtx
= stack_pointer_rtx
;
1974 reg
= gen_rtx_REG (mode
, regno
);
1975 mem
= gen_rtx_POST_MODIFY (Pmode
, base_rtx
,
1976 plus_constant (Pmode
, base_rtx
, adjustment
));
1977 mem
= gen_rtx_MEM (mode
, mem
);
1979 insn
= emit_move_insn (reg
, mem
);
1980 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
1981 RTX_FRAME_RELATED_P (insn
) = 1;
1985 aarch64_gen_storewb_pair (enum machine_mode mode
, rtx base
, rtx reg
, rtx reg2
,
1986 HOST_WIDE_INT adjustment
)
1991 return gen_storewb_pairdi_di (base
, base
, reg
, reg2
,
1992 GEN_INT (-adjustment
),
1993 GEN_INT (UNITS_PER_WORD
- adjustment
));
1995 return gen_storewb_pairdf_di (base
, base
, reg
, reg2
,
1996 GEN_INT (-adjustment
),
1997 GEN_INT (UNITS_PER_WORD
- adjustment
));
2004 aarch64_pushwb_pair_reg (enum machine_mode mode
, unsigned regno1
,
2005 unsigned regno2
, HOST_WIDE_INT adjustment
)
2008 rtx reg1
= gen_rtx_REG (mode
, regno1
);
2009 rtx reg2
= gen_rtx_REG (mode
, regno2
);
2011 insn
= emit_insn (aarch64_gen_storewb_pair (mode
, stack_pointer_rtx
, reg1
,
2013 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 2)) = 1;
2015 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
2016 RTX_FRAME_RELATED_P (insn
) = 1;
2020 aarch64_gen_loadwb_pair (enum machine_mode mode
, rtx base
, rtx reg
, rtx reg2
,
2021 HOST_WIDE_INT adjustment
)
2026 return gen_loadwb_pairdi_di (base
, base
, reg
, reg2
, GEN_INT (adjustment
),
2027 GEN_INT (UNITS_PER_WORD
));
2029 return gen_loadwb_pairdf_di (base
, base
, reg
, reg2
, GEN_INT (adjustment
),
2030 GEN_INT (UNITS_PER_WORD
));
2037 aarch64_popwb_pair_reg (enum machine_mode mode
, unsigned regno1
,
2038 unsigned regno2
, HOST_WIDE_INT adjustment
, rtx cfa
)
2041 rtx reg1
= gen_rtx_REG (mode
, regno1
);
2042 rtx reg2
= gen_rtx_REG (mode
, regno2
);
2044 insn
= emit_insn (aarch64_gen_loadwb_pair (mode
, stack_pointer_rtx
, reg1
,
2046 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 2)) = 1;
2047 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
2048 RTX_FRAME_RELATED_P (insn
) = 1;
2051 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
2052 (gen_rtx_SET (Pmode
, stack_pointer_rtx
,
2053 plus_constant (Pmode
, cfa
, adjustment
))));
2055 add_reg_note (insn
, REG_CFA_RESTORE
, reg1
);
2056 add_reg_note (insn
, REG_CFA_RESTORE
, reg2
);
2060 aarch64_gen_store_pair (enum machine_mode mode
, rtx mem1
, rtx reg1
, rtx mem2
,
2066 return gen_store_pairdi (mem1
, reg1
, mem2
, reg2
);
2069 return gen_store_pairdf (mem1
, reg1
, mem2
, reg2
);
2077 aarch64_gen_load_pair (enum machine_mode mode
, rtx reg1
, rtx mem1
, rtx reg2
,
2083 return gen_load_pairdi (reg1
, mem1
, reg2
, mem2
);
2086 return gen_load_pairdf (reg1
, mem1
, reg2
, mem2
);
2095 aarch64_save_callee_saves (enum machine_mode mode
, HOST_WIDE_INT start_offset
,
2096 unsigned start
, unsigned limit
, bool skip_wb
)
2099 rtx (*gen_mem_ref
) (enum machine_mode
, rtx
) = (frame_pointer_needed
2100 ? gen_frame_mem
: gen_rtx_MEM
);
2104 for (regno
= aarch64_next_callee_save (start
, limit
);
2106 regno
= aarch64_next_callee_save (regno
+ 1, limit
))
2109 HOST_WIDE_INT offset
;
2112 && (regno
== cfun
->machine
->frame
.wb_candidate1
2113 || regno
== cfun
->machine
->frame
.wb_candidate2
))
2116 reg
= gen_rtx_REG (mode
, regno
);
2117 offset
= start_offset
+ cfun
->machine
->frame
.reg_offset
[regno
];
2118 mem
= gen_mem_ref (mode
, plus_constant (Pmode
, stack_pointer_rtx
,
2121 regno2
= aarch64_next_callee_save (regno
+ 1, limit
);
2124 && ((cfun
->machine
->frame
.reg_offset
[regno
] + UNITS_PER_WORD
)
2125 == cfun
->machine
->frame
.reg_offset
[regno2
]))
2128 rtx reg2
= gen_rtx_REG (mode
, regno2
);
2131 offset
= start_offset
+ cfun
->machine
->frame
.reg_offset
[regno2
];
2132 mem2
= gen_mem_ref (mode
, plus_constant (Pmode
, stack_pointer_rtx
,
2134 insn
= emit_insn (aarch64_gen_store_pair (mode
, mem
, reg
, mem2
,
2137 /* The first part of a frame-related parallel insn is
2138 always assumed to be relevant to the frame
2139 calculations; subsequent parts, are only
2140 frame-related if explicitly marked. */
2141 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
2145 insn
= emit_move_insn (mem
, reg
);
2147 RTX_FRAME_RELATED_P (insn
) = 1;
2152 aarch64_restore_callee_saves (enum machine_mode mode
,
2153 HOST_WIDE_INT start_offset
, unsigned start
,
2154 unsigned limit
, bool skip_wb
)
2157 rtx base_rtx
= stack_pointer_rtx
;
2158 rtx (*gen_mem_ref
) (enum machine_mode
, rtx
) = (frame_pointer_needed
2159 ? gen_frame_mem
: gen_rtx_MEM
);
2162 HOST_WIDE_INT offset
;
2164 for (regno
= aarch64_next_callee_save (start
, limit
);
2166 regno
= aarch64_next_callee_save (regno
+ 1, limit
))
2171 && (regno
== cfun
->machine
->frame
.wb_candidate1
2172 || regno
== cfun
->machine
->frame
.wb_candidate2
))
2175 reg
= gen_rtx_REG (mode
, regno
);
2176 offset
= start_offset
+ cfun
->machine
->frame
.reg_offset
[regno
];
2177 mem
= gen_mem_ref (mode
, plus_constant (Pmode
, base_rtx
, offset
));
2179 regno2
= aarch64_next_callee_save (regno
+ 1, limit
);
2182 && ((cfun
->machine
->frame
.reg_offset
[regno
] + UNITS_PER_WORD
)
2183 == cfun
->machine
->frame
.reg_offset
[regno2
]))
2185 rtx reg2
= gen_rtx_REG (mode
, regno2
);
2188 offset
= start_offset
+ cfun
->machine
->frame
.reg_offset
[regno2
];
2189 mem2
= gen_mem_ref (mode
, plus_constant (Pmode
, base_rtx
, offset
));
2190 insn
= emit_insn (aarch64_gen_load_pair (mode
, reg
, mem
, reg2
,
2192 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
2193 add_reg_note (insn
, REG_CFA_RESTORE
, reg2
);
2195 /* The first part of a frame-related parallel insn is
2196 always assumed to be relevant to the frame
2197 calculations; subsequent parts, are only
2198 frame-related if explicitly marked. */
2199 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
2204 insn
= emit_move_insn (reg
, mem
);
2205 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
2208 RTX_FRAME_RELATED_P (insn
) = 1;
2212 /* AArch64 stack frames generated by this compiler look like:
2214 +-------------------------------+
2216 | incoming stack arguments |
2218 +-------------------------------+
2219 | | <-- incoming stack pointer (aligned)
2220 | callee-allocated save area |
2221 | for register varargs |
2223 +-------------------------------+
2224 | local variables | <-- frame_pointer_rtx
2226 +-------------------------------+
2228 +-------------------------------+ |
2229 | callee-saved registers | | frame.saved_regs_size
2230 +-------------------------------+ |
2232 +-------------------------------+ |
2233 | FP' | / <- hard_frame_pointer_rtx (aligned)
2234 +-------------------------------+
2235 | dynamic allocation |
2236 +-------------------------------+
2238 +-------------------------------+
2239 | outgoing stack arguments | <-- arg_pointer
2241 +-------------------------------+
2242 | | <-- stack_pointer_rtx (aligned)
2244 Dynamic stack allocations via alloca() decrease stack_pointer_rtx
2245 but leave frame_pointer_rtx and hard_frame_pointer_rtx
2248 /* Generate the prologue instructions for entry into a function.
2249 Establish the stack frame by decreasing the stack pointer with a
2250 properly calculated size and, if necessary, create a frame record
2251 filled with the values of LR and previous frame pointer. The
2252 current FP is also set up if it is in use. */
2255 aarch64_expand_prologue (void)
2257 /* sub sp, sp, #<frame_size>
2258 stp {fp, lr}, [sp, #<frame_size> - 16]
2259 add fp, sp, #<frame_size> - hardfp_offset
2260 stp {cs_reg}, [fp, #-16] etc.
2262 sub sp, sp, <final_adjustment_if_any>
2264 HOST_WIDE_INT frame_size
, offset
;
2265 HOST_WIDE_INT fp_offset
; /* Offset from hard FP to SP. */
2268 aarch64_layout_frame ();
2270 if (flag_stack_usage_info
)
2271 current_function_static_stack_size
= cfun
->machine
->frame
.frame_size
;
2273 frame_size
= cfun
->machine
->frame
.frame_size
;
2274 offset
= cfun
->machine
->frame
.frame_size
;
2276 fp_offset
= cfun
->machine
->frame
.frame_size
2277 - cfun
->machine
->frame
.hard_fp_offset
;
2279 /* Store pairs and load pairs have a range only -512 to 504. */
2282 /* When the frame has a large size, an initial decrease is done on
2283 the stack pointer to jump over the callee-allocated save area for
2284 register varargs, the local variable area and/or the callee-saved
2285 register area. This will allow the pre-index write-back
2286 store pair instructions to be used for setting up the stack frame
2288 offset
= cfun
->machine
->frame
.hard_fp_offset
;
2290 offset
= cfun
->machine
->frame
.saved_regs_size
;
2292 frame_size
-= (offset
+ crtl
->outgoing_args_size
);
2295 if (frame_size
>= 0x1000000)
2297 rtx op0
= gen_rtx_REG (Pmode
, IP0_REGNUM
);
2298 emit_move_insn (op0
, GEN_INT (-frame_size
));
2299 emit_insn (gen_add2_insn (stack_pointer_rtx
, op0
));
2300 aarch64_set_frame_expr (gen_rtx_SET
2301 (Pmode
, stack_pointer_rtx
,
2302 plus_constant (Pmode
,
2306 else if (frame_size
> 0)
2308 if ((frame_size
& 0xfff) != frame_size
)
2310 insn
= emit_insn (gen_add2_insn
2312 GEN_INT (-(frame_size
2313 & ~(HOST_WIDE_INT
)0xfff))));
2314 RTX_FRAME_RELATED_P (insn
) = 1;
2316 if ((frame_size
& 0xfff) != 0)
2318 insn
= emit_insn (gen_add2_insn
2320 GEN_INT (-(frame_size
2321 & (HOST_WIDE_INT
)0xfff))));
2322 RTX_FRAME_RELATED_P (insn
) = 1;
2331 bool skip_wb
= false;
2333 if (frame_pointer_needed
)
2339 insn
= emit_insn (gen_add2_insn (stack_pointer_rtx
,
2340 GEN_INT (-offset
)));
2341 RTX_FRAME_RELATED_P (insn
) = 1;
2342 aarch64_set_frame_expr (gen_rtx_SET
2343 (Pmode
, stack_pointer_rtx
,
2344 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
,
2345 GEN_INT (offset
))));
2347 aarch64_save_callee_saves (DImode
, fp_offset
, R29_REGNUM
,
2351 aarch64_pushwb_pair_reg (DImode
, R29_REGNUM
, R30_REGNUM
, offset
);
2353 /* Set up frame pointer to point to the location of the
2354 previous frame pointer on the stack. */
2355 insn
= emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
2357 GEN_INT (fp_offset
)));
2358 aarch64_set_frame_expr (gen_rtx_SET
2359 (Pmode
, hard_frame_pointer_rtx
,
2360 plus_constant (Pmode
,
2363 RTX_FRAME_RELATED_P (insn
) = 1;
2364 insn
= emit_insn (gen_stack_tie (stack_pointer_rtx
,
2365 hard_frame_pointer_rtx
));
2369 unsigned reg1
= cfun
->machine
->frame
.wb_candidate1
;
2370 unsigned reg2
= cfun
->machine
->frame
.wb_candidate2
;
2373 || reg1
== FIRST_PSEUDO_REGISTER
2374 || (reg2
== FIRST_PSEUDO_REGISTER
2377 insn
= emit_insn (gen_add2_insn (stack_pointer_rtx
,
2378 GEN_INT (-offset
)));
2379 RTX_FRAME_RELATED_P (insn
) = 1;
2383 enum machine_mode mode1
= (reg1
<= R30_REGNUM
) ? DImode
: DFmode
;
2387 if (reg2
== FIRST_PSEUDO_REGISTER
)
2388 aarch64_pushwb_single_reg (mode1
, reg1
, offset
);
2390 aarch64_pushwb_pair_reg (mode1
, reg1
, reg2
, offset
);
2394 aarch64_save_callee_saves (DImode
, fp_offset
, R0_REGNUM
, R30_REGNUM
,
2396 aarch64_save_callee_saves (DFmode
, fp_offset
, V0_REGNUM
, V31_REGNUM
,
2400 /* when offset >= 512,
2401 sub sp, sp, #<outgoing_args_size> */
2402 if (frame_size
> -1)
2404 if (crtl
->outgoing_args_size
> 0)
2406 insn
= emit_insn (gen_add2_insn
2408 GEN_INT (- crtl
->outgoing_args_size
)));
2409 RTX_FRAME_RELATED_P (insn
) = 1;
2414 /* Generate the epilogue instructions for returning from a function. */
2416 aarch64_expand_epilogue (bool for_sibcall
)
2418 HOST_WIDE_INT frame_size
, offset
;
2419 HOST_WIDE_INT fp_offset
;
2423 aarch64_layout_frame ();
2425 offset
= frame_size
= cfun
->machine
->frame
.frame_size
;
2426 fp_offset
= cfun
->machine
->frame
.frame_size
2427 - cfun
->machine
->frame
.hard_fp_offset
;
2429 cfa_reg
= frame_pointer_needed
? hard_frame_pointer_rtx
: stack_pointer_rtx
;
2431 /* Store pairs and load pairs have a range only -512 to 504. */
2434 offset
= cfun
->machine
->frame
.hard_fp_offset
;
2436 offset
= cfun
->machine
->frame
.saved_regs_size
;
2438 frame_size
-= (offset
+ crtl
->outgoing_args_size
);
2440 if (!frame_pointer_needed
&& crtl
->outgoing_args_size
> 0)
2442 insn
= emit_insn (gen_add2_insn
2444 GEN_INT (crtl
->outgoing_args_size
)));
2445 RTX_FRAME_RELATED_P (insn
) = 1;
2451 /* If there were outgoing arguments or we've done dynamic stack
2452 allocation, then restore the stack pointer from the frame
2453 pointer. This is at most one insn and more efficient than using
2454 GCC's internal mechanism. */
2455 if (frame_pointer_needed
2456 && (crtl
->outgoing_args_size
|| cfun
->calls_alloca
))
2458 insn
= emit_insn (gen_add3_insn (stack_pointer_rtx
,
2459 hard_frame_pointer_rtx
,
2461 offset
= offset
- fp_offset
;
2462 RTX_FRAME_RELATED_P (insn
) = 1;
2463 /* As SP is set to (FP - fp_offset), according to the rules in
2464 dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated
2465 from the value of SP from now on. */
2466 cfa_reg
= stack_pointer_rtx
;
2471 unsigned reg1
= cfun
->machine
->frame
.wb_candidate1
;
2472 unsigned reg2
= cfun
->machine
->frame
.wb_candidate2
;
2473 bool skip_wb
= true;
2475 if (frame_pointer_needed
)
2478 || reg1
== FIRST_PSEUDO_REGISTER
2479 || (reg2
== FIRST_PSEUDO_REGISTER
2483 aarch64_restore_callee_saves (DImode
, fp_offset
, R0_REGNUM
, R30_REGNUM
,
2485 aarch64_restore_callee_saves (DFmode
, fp_offset
, V0_REGNUM
, V31_REGNUM
,
2490 enum machine_mode mode1
= (reg1
<= R30_REGNUM
) ? DImode
: DFmode
;
2492 if (reg2
== FIRST_PSEUDO_REGISTER
)
2493 aarch64_popwb_single_reg (mode1
, reg1
, offset
);
2496 if (reg1
!= HARD_FRAME_POINTER_REGNUM
)
2499 aarch64_popwb_pair_reg (mode1
, reg1
, reg2
, offset
, cfa_reg
);
2504 insn
= emit_insn (gen_add2_insn (stack_pointer_rtx
,
2506 RTX_FRAME_RELATED_P (insn
) = 1;
2510 /* Stack adjustment for exception handler. */
2511 if (crtl
->calls_eh_return
)
2513 /* We need to unwind the stack by the offset computed by
2514 EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
2515 based on SP. Ideally we would update the SP and define the
2516 CFA along the lines of:
2518 SP = SP + EH_RETURN_STACKADJ_RTX
2519 (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
2521 However the dwarf emitter only understands a constant
2524 The solution chosen here is to use the otherwise unused IP0
2525 as a temporary register to hold the current SP value. The
2526 CFA is described using IP0 then SP is modified. */
2528 rtx ip0
= gen_rtx_REG (DImode
, IP0_REGNUM
);
2530 insn
= emit_move_insn (ip0
, stack_pointer_rtx
);
2531 add_reg_note (insn
, REG_CFA_DEF_CFA
, ip0
);
2532 RTX_FRAME_RELATED_P (insn
) = 1;
2534 emit_insn (gen_add2_insn (stack_pointer_rtx
, EH_RETURN_STACKADJ_RTX
));
2536 /* Ensure the assignment to IP0 does not get optimized away. */
2540 if (frame_size
> -1)
2542 if (frame_size
>= 0x1000000)
2544 rtx op0
= gen_rtx_REG (Pmode
, IP0_REGNUM
);
2545 emit_move_insn (op0
, GEN_INT (frame_size
));
2546 emit_insn (gen_add2_insn (stack_pointer_rtx
, op0
));
2547 aarch64_set_frame_expr (gen_rtx_SET
2548 (Pmode
, stack_pointer_rtx
,
2549 plus_constant (Pmode
,
2553 else if (frame_size
> 0)
2555 if ((frame_size
& 0xfff) != 0)
2557 insn
= emit_insn (gen_add2_insn
2559 GEN_INT ((frame_size
2560 & (HOST_WIDE_INT
) 0xfff))));
2561 RTX_FRAME_RELATED_P (insn
) = 1;
2563 if ((frame_size
& 0xfff) != frame_size
)
2565 insn
= emit_insn (gen_add2_insn
2567 GEN_INT ((frame_size
2568 & ~ (HOST_WIDE_INT
) 0xfff))));
2569 RTX_FRAME_RELATED_P (insn
) = 1;
2573 aarch64_set_frame_expr (gen_rtx_SET (Pmode
, stack_pointer_rtx
,
2574 plus_constant (Pmode
,
2579 emit_use (gen_rtx_REG (DImode
, LR_REGNUM
));
2581 emit_jump_insn (ret_rtx
);
2584 /* Return the place to copy the exception unwinding return address to.
2585 This will probably be a stack slot, but could (in theory be the
2586 return register). */
2588 aarch64_final_eh_return_addr (void)
2590 HOST_WIDE_INT fp_offset
;
2592 aarch64_layout_frame ();
2594 fp_offset
= cfun
->machine
->frame
.frame_size
2595 - cfun
->machine
->frame
.hard_fp_offset
;
2597 if (cfun
->machine
->frame
.reg_offset
[LR_REGNUM
] < 0)
2598 return gen_rtx_REG (DImode
, LR_REGNUM
);
2600 /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
2601 result in a store to save LR introduced by builtin_eh_return () being
2602 incorrectly deleted because the alias is not detected.
2603 So in the calculation of the address to copy the exception unwinding
2604 return address to, we note 2 cases.
2605 If FP is needed and the fp_offset is 0, it means that SP = FP and hence
2606 we return a SP-relative location since all the addresses are SP-relative
2607 in this case. This prevents the store from being optimized away.
2608 If the fp_offset is not 0, then the addresses will be FP-relative and
2609 therefore we return a FP-relative location. */
2611 if (frame_pointer_needed
)
2614 return gen_frame_mem (DImode
,
2615 plus_constant (Pmode
, hard_frame_pointer_rtx
, UNITS_PER_WORD
));
2617 return gen_frame_mem (DImode
,
2618 plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
));
2621 /* If FP is not needed, we calculate the location of LR, which would be
2622 at the top of the saved registers block. */
2624 return gen_frame_mem (DImode
,
2625 plus_constant (Pmode
,
2628 + cfun
->machine
->frame
.saved_regs_size
2629 - 2 * UNITS_PER_WORD
));
2632 /* Possibly output code to build up a constant in a register. For
2633 the benefit of the costs infrastructure, returns the number of
2634 instructions which would be emitted. GENERATE inhibits or
2635 enables code generation. */
2638 aarch64_build_constant (int regnum
, HOST_WIDE_INT val
, bool generate
)
2642 if (aarch64_bitmask_imm (val
, DImode
))
2645 emit_move_insn (gen_rtx_REG (Pmode
, regnum
), GEN_INT (val
));
2653 HOST_WIDE_INT valp
= val
>> 16;
2657 for (i
= 16; i
< 64; i
+= 16)
2659 valm
= (valp
& 0xffff);
2670 /* zcount contains the number of additional MOVK instructions
2671 required if the constant is built up with an initial MOVZ instruction,
2672 while ncount is the number of MOVK instructions required if starting
2673 with a MOVN instruction. Choose the sequence that yields the fewest
2674 number of instructions, preferring MOVZ instructions when they are both
2676 if (ncount
< zcount
)
2679 emit_move_insn (gen_rtx_REG (Pmode
, regnum
),
2680 GEN_INT (val
| ~(HOST_WIDE_INT
) 0xffff));
2687 emit_move_insn (gen_rtx_REG (Pmode
, regnum
),
2688 GEN_INT (val
& 0xffff));
2695 for (i
= 16; i
< 64; i
+= 16)
2697 if ((val
& 0xffff) != tval
)
2700 emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode
, regnum
),
2702 GEN_INT (val
& 0xffff)));
2712 aarch64_add_constant (int regnum
, int scratchreg
, HOST_WIDE_INT delta
)
2714 HOST_WIDE_INT mdelta
= delta
;
2715 rtx this_rtx
= gen_rtx_REG (Pmode
, regnum
);
2716 rtx scratch_rtx
= gen_rtx_REG (Pmode
, scratchreg
);
2721 if (mdelta
>= 4096 * 4096)
2723 (void) aarch64_build_constant (scratchreg
, delta
, true);
2724 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, scratch_rtx
));
2726 else if (mdelta
> 0)
2730 emit_insn (gen_rtx_SET (Pmode
, scratch_rtx
, GEN_INT (mdelta
/ 4096)));
2731 rtx shift
= gen_rtx_ASHIFT (Pmode
, scratch_rtx
, GEN_INT (12));
2733 emit_insn (gen_rtx_SET (Pmode
, this_rtx
,
2734 gen_rtx_MINUS (Pmode
, this_rtx
, shift
)));
2736 emit_insn (gen_rtx_SET (Pmode
, this_rtx
,
2737 gen_rtx_PLUS (Pmode
, this_rtx
, shift
)));
2739 if (mdelta
% 4096 != 0)
2741 scratch_rtx
= GEN_INT ((delta
< 0 ? -1 : 1) * (mdelta
% 4096));
2742 emit_insn (gen_rtx_SET (Pmode
, this_rtx
,
2743 gen_rtx_PLUS (Pmode
, this_rtx
, scratch_rtx
)));
2748 /* Output code to add DELTA to the first argument, and then jump
2749 to FUNCTION. Used for C++ multiple inheritance. */
2751 aarch64_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
2752 HOST_WIDE_INT delta
,
2753 HOST_WIDE_INT vcall_offset
,
2756 /* The this pointer is always in x0. Note that this differs from
2757 Arm where the this pointer maybe bumped to r1 if r0 is required
2758 to return a pointer to an aggregate. On AArch64 a result value
2759 pointer will be in x8. */
2760 int this_regno
= R0_REGNUM
;
2761 rtx this_rtx
, temp0
, temp1
, addr
, funexp
;
2764 reload_completed
= 1;
2765 emit_note (NOTE_INSN_PROLOGUE_END
);
2767 if (vcall_offset
== 0)
2768 aarch64_add_constant (this_regno
, IP1_REGNUM
, delta
);
2771 gcc_assert ((vcall_offset
& (POINTER_BYTES
- 1)) == 0);
2773 this_rtx
= gen_rtx_REG (Pmode
, this_regno
);
2774 temp0
= gen_rtx_REG (Pmode
, IP0_REGNUM
);
2775 temp1
= gen_rtx_REG (Pmode
, IP1_REGNUM
);
2780 if (delta
>= -256 && delta
< 256)
2781 addr
= gen_rtx_PRE_MODIFY (Pmode
, this_rtx
,
2782 plus_constant (Pmode
, this_rtx
, delta
));
2784 aarch64_add_constant (this_regno
, IP1_REGNUM
, delta
);
2787 if (Pmode
== ptr_mode
)
2788 aarch64_emit_move (temp0
, gen_rtx_MEM (ptr_mode
, addr
));
2790 aarch64_emit_move (temp0
,
2791 gen_rtx_ZERO_EXTEND (Pmode
,
2792 gen_rtx_MEM (ptr_mode
, addr
)));
2794 if (vcall_offset
>= -256 && vcall_offset
< 4096 * POINTER_BYTES
)
2795 addr
= plus_constant (Pmode
, temp0
, vcall_offset
);
2798 (void) aarch64_build_constant (IP1_REGNUM
, vcall_offset
, true);
2799 addr
= gen_rtx_PLUS (Pmode
, temp0
, temp1
);
2802 if (Pmode
== ptr_mode
)
2803 aarch64_emit_move (temp1
, gen_rtx_MEM (ptr_mode
,addr
));
2805 aarch64_emit_move (temp1
,
2806 gen_rtx_SIGN_EXTEND (Pmode
,
2807 gen_rtx_MEM (ptr_mode
, addr
)));
2809 emit_insn (gen_add2_insn (this_rtx
, temp1
));
2812 /* Generate a tail call to the target function. */
2813 if (!TREE_USED (function
))
2815 assemble_external (function
);
2816 TREE_USED (function
) = 1;
2818 funexp
= XEXP (DECL_RTL (function
), 0);
2819 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
2820 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
, NULL_RTX
));
2821 SIBLING_CALL_P (insn
) = 1;
2823 insn
= get_insns ();
2824 shorten_branches (insn
);
2825 final_start_function (insn
, file
, 1);
2826 final (insn
, file
, 1);
2827 final_end_function ();
2829 /* Stop pretending to be a post-reload pass. */
2830 reload_completed
= 0;
2834 aarch64_tls_operand_p_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
2836 if (GET_CODE (*x
) == SYMBOL_REF
)
2837 return SYMBOL_REF_TLS_MODEL (*x
) != 0;
2839 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
2840 TLS offsets, not real symbol references. */
2841 if (GET_CODE (*x
) == UNSPEC
2842 && XINT (*x
, 1) == UNSPEC_TLS
)
2849 aarch64_tls_referenced_p (rtx x
)
2851 if (!TARGET_HAVE_TLS
)
2854 return for_each_rtx (&x
, aarch64_tls_operand_p_1
, NULL
);
2859 aarch64_bitmasks_cmp (const void *i1
, const void *i2
)
2861 const unsigned HOST_WIDE_INT
*imm1
= (const unsigned HOST_WIDE_INT
*) i1
;
2862 const unsigned HOST_WIDE_INT
*imm2
= (const unsigned HOST_WIDE_INT
*) i2
;
2873 aarch64_build_bitmask_table (void)
2875 unsigned HOST_WIDE_INT mask
, imm
;
2876 unsigned int log_e
, e
, s
, r
;
2877 unsigned int nimms
= 0;
2879 for (log_e
= 1; log_e
<= 6; log_e
++)
2883 mask
= ~(HOST_WIDE_INT
) 0;
2885 mask
= ((HOST_WIDE_INT
) 1 << e
) - 1;
2886 for (s
= 1; s
< e
; s
++)
2888 for (r
= 0; r
< e
; r
++)
2890 /* set s consecutive bits to 1 (s < 64) */
2891 imm
= ((unsigned HOST_WIDE_INT
)1 << s
) - 1;
2892 /* rotate right by r */
2894 imm
= ((imm
>> r
) | (imm
<< (e
- r
))) & mask
;
2895 /* replicate the constant depending on SIMD size */
2897 case 1: imm
|= (imm
<< 2);
2898 case 2: imm
|= (imm
<< 4);
2899 case 3: imm
|= (imm
<< 8);
2900 case 4: imm
|= (imm
<< 16);
2901 case 5: imm
|= (imm
<< 32);
2907 gcc_assert (nimms
< AARCH64_NUM_BITMASKS
);
2908 aarch64_bitmasks
[nimms
++] = imm
;
2913 gcc_assert (nimms
== AARCH64_NUM_BITMASKS
);
2914 qsort (aarch64_bitmasks
, nimms
, sizeof (aarch64_bitmasks
[0]),
2915 aarch64_bitmasks_cmp
);
2919 /* Return true if val can be encoded as a 12-bit unsigned immediate with
2920 a left shift of 0 or 12 bits. */
2922 aarch64_uimm12_shift (HOST_WIDE_INT val
)
2924 return ((val
& (((HOST_WIDE_INT
) 0xfff) << 0)) == val
2925 || (val
& (((HOST_WIDE_INT
) 0xfff) << 12)) == val
2930 /* Return true if val is an immediate that can be loaded into a
2931 register by a MOVZ instruction. */
2933 aarch64_movw_imm (HOST_WIDE_INT val
, enum machine_mode mode
)
2935 if (GET_MODE_SIZE (mode
) > 4)
2937 if ((val
& (((HOST_WIDE_INT
) 0xffff) << 32)) == val
2938 || (val
& (((HOST_WIDE_INT
) 0xffff) << 48)) == val
)
2943 /* Ignore sign extension. */
2944 val
&= (HOST_WIDE_INT
) 0xffffffff;
2946 return ((val
& (((HOST_WIDE_INT
) 0xffff) << 0)) == val
2947 || (val
& (((HOST_WIDE_INT
) 0xffff) << 16)) == val
);
2951 /* Return true if val is a valid bitmask immediate. */
2953 aarch64_bitmask_imm (HOST_WIDE_INT val
, enum machine_mode mode
)
2955 if (GET_MODE_SIZE (mode
) < 8)
2957 /* Replicate bit pattern. */
2958 val
&= (HOST_WIDE_INT
) 0xffffffff;
2961 return bsearch (&val
, aarch64_bitmasks
, AARCH64_NUM_BITMASKS
,
2962 sizeof (aarch64_bitmasks
[0]), aarch64_bitmasks_cmp
) != NULL
;
2966 /* Return true if val is an immediate that can be loaded into a
2967 register in a single instruction. */
2969 aarch64_move_imm (HOST_WIDE_INT val
, enum machine_mode mode
)
2971 if (aarch64_movw_imm (val
, mode
) || aarch64_movw_imm (~val
, mode
))
2973 return aarch64_bitmask_imm (val
, mode
);
2977 aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2981 if (GET_CODE (x
) == HIGH
)
2984 split_const (x
, &base
, &offset
);
2985 if (GET_CODE (base
) == SYMBOL_REF
|| GET_CODE (base
) == LABEL_REF
)
2987 if (aarch64_classify_symbol (base
, SYMBOL_CONTEXT_ADR
)
2988 != SYMBOL_FORCE_TO_MEM
)
2991 /* Avoid generating a 64-bit relocation in ILP32; leave
2992 to aarch64_expand_mov_immediate to handle it properly. */
2993 return mode
!= ptr_mode
;
2996 return aarch64_tls_referenced_p (x
);
2999 /* Return true if register REGNO is a valid index register.
3000 STRICT_P is true if REG_OK_STRICT is in effect. */
3003 aarch64_regno_ok_for_index_p (int regno
, bool strict_p
)
3005 if (!HARD_REGISTER_NUM_P (regno
))
3013 regno
= reg_renumber
[regno
];
3015 return GP_REGNUM_P (regno
);
3018 /* Return true if register REGNO is a valid base register for mode MODE.
3019 STRICT_P is true if REG_OK_STRICT is in effect. */
3022 aarch64_regno_ok_for_base_p (int regno
, bool strict_p
)
3024 if (!HARD_REGISTER_NUM_P (regno
))
3032 regno
= reg_renumber
[regno
];
3035 /* The fake registers will be eliminated to either the stack or
3036 hard frame pointer, both of which are usually valid base registers.
3037 Reload deals with the cases where the eliminated form isn't valid. */
3038 return (GP_REGNUM_P (regno
)
3039 || regno
== SP_REGNUM
3040 || regno
== FRAME_POINTER_REGNUM
3041 || regno
== ARG_POINTER_REGNUM
);
3044 /* Return true if X is a valid base register for mode MODE.
3045 STRICT_P is true if REG_OK_STRICT is in effect. */
3048 aarch64_base_register_rtx_p (rtx x
, bool strict_p
)
3050 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
3053 return (REG_P (x
) && aarch64_regno_ok_for_base_p (REGNO (x
), strict_p
));
3056 /* Return true if address offset is a valid index. If it is, fill in INFO
3057 appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
3060 aarch64_classify_index (struct aarch64_address_info
*info
, rtx x
,
3061 enum machine_mode mode
, bool strict_p
)
3063 enum aarch64_address_type type
;
3068 if ((REG_P (x
) || GET_CODE (x
) == SUBREG
)
3069 && GET_MODE (x
) == Pmode
)
3071 type
= ADDRESS_REG_REG
;
3075 /* (sign_extend:DI (reg:SI)) */
3076 else if ((GET_CODE (x
) == SIGN_EXTEND
3077 || GET_CODE (x
) == ZERO_EXTEND
)
3078 && GET_MODE (x
) == DImode
3079 && GET_MODE (XEXP (x
, 0)) == SImode
)
3081 type
= (GET_CODE (x
) == SIGN_EXTEND
)
3082 ? ADDRESS_REG_SXTW
: ADDRESS_REG_UXTW
;
3083 index
= XEXP (x
, 0);
3086 /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
3087 else if (GET_CODE (x
) == MULT
3088 && (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
3089 || GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
)
3090 && GET_MODE (XEXP (x
, 0)) == DImode
3091 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
3092 && CONST_INT_P (XEXP (x
, 1)))
3094 type
= (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
3095 ? ADDRESS_REG_SXTW
: ADDRESS_REG_UXTW
;
3096 index
= XEXP (XEXP (x
, 0), 0);
3097 shift
= exact_log2 (INTVAL (XEXP (x
, 1)));
3099 /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
3100 else if (GET_CODE (x
) == ASHIFT
3101 && (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
3102 || GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
)
3103 && GET_MODE (XEXP (x
, 0)) == DImode
3104 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
3105 && CONST_INT_P (XEXP (x
, 1)))
3107 type
= (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
3108 ? ADDRESS_REG_SXTW
: ADDRESS_REG_UXTW
;
3109 index
= XEXP (XEXP (x
, 0), 0);
3110 shift
= INTVAL (XEXP (x
, 1));
3112 /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
3113 else if ((GET_CODE (x
) == SIGN_EXTRACT
3114 || GET_CODE (x
) == ZERO_EXTRACT
)
3115 && GET_MODE (x
) == DImode
3116 && GET_CODE (XEXP (x
, 0)) == MULT
3117 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == DImode
3118 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
3120 type
= (GET_CODE (x
) == SIGN_EXTRACT
)
3121 ? ADDRESS_REG_SXTW
: ADDRESS_REG_UXTW
;
3122 index
= XEXP (XEXP (x
, 0), 0);
3123 shift
= exact_log2 (INTVAL (XEXP (XEXP (x
, 0), 1)));
3124 if (INTVAL (XEXP (x
, 1)) != 32 + shift
3125 || INTVAL (XEXP (x
, 2)) != 0)
3128 /* (and:DI (mult:DI (reg:DI) (const_int scale))
3129 (const_int 0xffffffff<<shift)) */
3130 else if (GET_CODE (x
) == AND
3131 && GET_MODE (x
) == DImode
3132 && GET_CODE (XEXP (x
, 0)) == MULT
3133 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == DImode
3134 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3135 && CONST_INT_P (XEXP (x
, 1)))
3137 type
= ADDRESS_REG_UXTW
;
3138 index
= XEXP (XEXP (x
, 0), 0);
3139 shift
= exact_log2 (INTVAL (XEXP (XEXP (x
, 0), 1)));
3140 if (INTVAL (XEXP (x
, 1)) != (HOST_WIDE_INT
)0xffffffff << shift
)
3143 /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
3144 else if ((GET_CODE (x
) == SIGN_EXTRACT
3145 || GET_CODE (x
) == ZERO_EXTRACT
)
3146 && GET_MODE (x
) == DImode
3147 && GET_CODE (XEXP (x
, 0)) == ASHIFT
3148 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == DImode
3149 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
3151 type
= (GET_CODE (x
) == SIGN_EXTRACT
)
3152 ? ADDRESS_REG_SXTW
: ADDRESS_REG_UXTW
;
3153 index
= XEXP (XEXP (x
, 0), 0);
3154 shift
= INTVAL (XEXP (XEXP (x
, 0), 1));
3155 if (INTVAL (XEXP (x
, 1)) != 32 + shift
3156 || INTVAL (XEXP (x
, 2)) != 0)
3159 /* (and:DI (ashift:DI (reg:DI) (const_int shift))
3160 (const_int 0xffffffff<<shift)) */
3161 else if (GET_CODE (x
) == AND
3162 && GET_MODE (x
) == DImode
3163 && GET_CODE (XEXP (x
, 0)) == ASHIFT
3164 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == DImode
3165 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3166 && CONST_INT_P (XEXP (x
, 1)))
3168 type
= ADDRESS_REG_UXTW
;
3169 index
= XEXP (XEXP (x
, 0), 0);
3170 shift
= INTVAL (XEXP (XEXP (x
, 0), 1));
3171 if (INTVAL (XEXP (x
, 1)) != (HOST_WIDE_INT
)0xffffffff << shift
)
3174 /* (mult:P (reg:P) (const_int scale)) */
3175 else if (GET_CODE (x
) == MULT
3176 && GET_MODE (x
) == Pmode
3177 && GET_MODE (XEXP (x
, 0)) == Pmode
3178 && CONST_INT_P (XEXP (x
, 1)))
3180 type
= ADDRESS_REG_REG
;
3181 index
= XEXP (x
, 0);
3182 shift
= exact_log2 (INTVAL (XEXP (x
, 1)));
3184 /* (ashift:P (reg:P) (const_int shift)) */
3185 else if (GET_CODE (x
) == ASHIFT
3186 && GET_MODE (x
) == Pmode
3187 && GET_MODE (XEXP (x
, 0)) == Pmode
3188 && CONST_INT_P (XEXP (x
, 1)))
3190 type
= ADDRESS_REG_REG
;
3191 index
= XEXP (x
, 0);
3192 shift
= INTVAL (XEXP (x
, 1));
3197 if (GET_CODE (index
) == SUBREG
)
3198 index
= SUBREG_REG (index
);
3201 (shift
> 0 && shift
<= 3
3202 && (1 << shift
) == GET_MODE_SIZE (mode
)))
3204 && aarch64_regno_ok_for_index_p (REGNO (index
), strict_p
))
3207 info
->offset
= index
;
3208 info
->shift
= shift
;
3216 aarch64_offset_7bit_signed_scaled_p (enum machine_mode mode
, HOST_WIDE_INT offset
)
3218 return (offset
>= -64 * GET_MODE_SIZE (mode
)
3219 && offset
< 64 * GET_MODE_SIZE (mode
)
3220 && offset
% GET_MODE_SIZE (mode
) == 0);
3224 offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
3225 HOST_WIDE_INT offset
)
3227 return offset
>= -256 && offset
< 256;
3231 offset_12bit_unsigned_scaled_p (enum machine_mode mode
, HOST_WIDE_INT offset
)
3234 && offset
< 4096 * GET_MODE_SIZE (mode
)
3235 && offset
% GET_MODE_SIZE (mode
) == 0);
3238 /* Return true if X is a valid address for machine mode MODE. If it is,
3239 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
3240 effect. OUTER_CODE is PARALLEL for a load/store pair. */
3243 aarch64_classify_address (struct aarch64_address_info
*info
,
3244 rtx x
, enum machine_mode mode
,
3245 RTX_CODE outer_code
, bool strict_p
)
3247 enum rtx_code code
= GET_CODE (x
);
3249 bool allow_reg_index_p
=
3250 outer_code
!= PARALLEL
&& (GET_MODE_SIZE (mode
) != 16
3251 || aarch64_vector_mode_supported_p (mode
));
3252 /* Don't support anything other than POST_INC or REG addressing for
3254 if (aarch64_vect_struct_mode_p (mode
)
3255 && (code
!= POST_INC
&& code
!= REG
))
3262 info
->type
= ADDRESS_REG_IMM
;
3264 info
->offset
= const0_rtx
;
3265 return aarch64_base_register_rtx_p (x
, strict_p
);
3273 && (op0
== virtual_stack_vars_rtx
3274 || op0
== frame_pointer_rtx
3275 || op0
== arg_pointer_rtx
)
3276 && CONST_INT_P (op1
))
3278 info
->type
= ADDRESS_REG_IMM
;
3285 if (GET_MODE_SIZE (mode
) != 0
3286 && CONST_INT_P (op1
)
3287 && aarch64_base_register_rtx_p (op0
, strict_p
))
3289 HOST_WIDE_INT offset
= INTVAL (op1
);
3291 info
->type
= ADDRESS_REG_IMM
;
3295 /* TImode and TFmode values are allowed in both pairs of X
3296 registers and individual Q registers. The available
3298 X,X: 7-bit signed scaled offset
3299 Q: 9-bit signed offset
3300 We conservatively require an offset representable in either mode.
3302 if (mode
== TImode
|| mode
== TFmode
)
3303 return (aarch64_offset_7bit_signed_scaled_p (mode
, offset
)
3304 && offset_9bit_signed_unscaled_p (mode
, offset
));
3306 if (outer_code
== PARALLEL
)
3307 return ((GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)
3308 && aarch64_offset_7bit_signed_scaled_p (mode
, offset
));
3310 return (offset_9bit_signed_unscaled_p (mode
, offset
)
3311 || offset_12bit_unsigned_scaled_p (mode
, offset
));
3314 if (allow_reg_index_p
)
3316 /* Look for base + (scaled/extended) index register. */
3317 if (aarch64_base_register_rtx_p (op0
, strict_p
)
3318 && aarch64_classify_index (info
, op1
, mode
, strict_p
))
3323 if (aarch64_base_register_rtx_p (op1
, strict_p
)
3324 && aarch64_classify_index (info
, op0
, mode
, strict_p
))
3337 info
->type
= ADDRESS_REG_WB
;
3338 info
->base
= XEXP (x
, 0);
3339 info
->offset
= NULL_RTX
;
3340 return aarch64_base_register_rtx_p (info
->base
, strict_p
);
3344 info
->type
= ADDRESS_REG_WB
;
3345 info
->base
= XEXP (x
, 0);
3346 if (GET_CODE (XEXP (x
, 1)) == PLUS
3347 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
3348 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), info
->base
)
3349 && aarch64_base_register_rtx_p (info
->base
, strict_p
))
3351 HOST_WIDE_INT offset
;
3352 info
->offset
= XEXP (XEXP (x
, 1), 1);
3353 offset
= INTVAL (info
->offset
);
3355 /* TImode and TFmode values are allowed in both pairs of X
3356 registers and individual Q registers. The available
3358 X,X: 7-bit signed scaled offset
3359 Q: 9-bit signed offset
3360 We conservatively require an offset representable in either mode.
3362 if (mode
== TImode
|| mode
== TFmode
)
3363 return (aarch64_offset_7bit_signed_scaled_p (mode
, offset
)
3364 && offset_9bit_signed_unscaled_p (mode
, offset
));
3366 if (outer_code
== PARALLEL
)
3367 return ((GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)
3368 && aarch64_offset_7bit_signed_scaled_p (mode
, offset
));
3370 return offset_9bit_signed_unscaled_p (mode
, offset
);
3377 /* load literal: pc-relative constant pool entry. Only supported
3378 for SI mode or larger. */
3379 info
->type
= ADDRESS_SYMBOLIC
;
3380 if (outer_code
!= PARALLEL
&& GET_MODE_SIZE (mode
) >= 4)
3384 split_const (x
, &sym
, &addend
);
3385 return (GET_CODE (sym
) == LABEL_REF
3386 || (GET_CODE (sym
) == SYMBOL_REF
3387 && CONSTANT_POOL_ADDRESS_P (sym
)));
3392 info
->type
= ADDRESS_LO_SUM
;
3393 info
->base
= XEXP (x
, 0);
3394 info
->offset
= XEXP (x
, 1);
3395 if (allow_reg_index_p
3396 && aarch64_base_register_rtx_p (info
->base
, strict_p
))
3399 split_const (info
->offset
, &sym
, &offs
);
3400 if (GET_CODE (sym
) == SYMBOL_REF
3401 && (aarch64_classify_symbol (sym
, SYMBOL_CONTEXT_MEM
)
3402 == SYMBOL_SMALL_ABSOLUTE
))
3404 /* The symbol and offset must be aligned to the access size. */
3406 unsigned int ref_size
;
3408 if (CONSTANT_POOL_ADDRESS_P (sym
))
3409 align
= GET_MODE_ALIGNMENT (get_pool_mode (sym
));
3410 else if (TREE_CONSTANT_POOL_ADDRESS_P (sym
))
3412 tree exp
= SYMBOL_REF_DECL (sym
);
3413 align
= TYPE_ALIGN (TREE_TYPE (exp
));
3414 align
= CONSTANT_ALIGNMENT (exp
, align
);
3416 else if (SYMBOL_REF_DECL (sym
))
3417 align
= DECL_ALIGN (SYMBOL_REF_DECL (sym
));
3418 else if (SYMBOL_REF_HAS_BLOCK_INFO_P (sym
)
3419 && SYMBOL_REF_BLOCK (sym
) != NULL
)
3420 align
= SYMBOL_REF_BLOCK (sym
)->alignment
;
3422 align
= BITS_PER_UNIT
;
3424 ref_size
= GET_MODE_SIZE (mode
);
3426 ref_size
= GET_MODE_SIZE (DImode
);
3428 return ((INTVAL (offs
) & (ref_size
- 1)) == 0
3429 && ((align
/ BITS_PER_UNIT
) & (ref_size
- 1)) == 0);
3440 aarch64_symbolic_address_p (rtx x
)
3444 split_const (x
, &x
, &offset
);
3445 return GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
;
3448 /* Classify the base of symbolic expression X, given that X appears in
3451 enum aarch64_symbol_type
3452 aarch64_classify_symbolic_expression (rtx x
,
3453 enum aarch64_symbol_context context
)
3457 split_const (x
, &x
, &offset
);
3458 return aarch64_classify_symbol (x
, context
);
3462 /* Return TRUE if X is a legitimate address for accessing memory in
3465 aarch64_legitimate_address_hook_p (enum machine_mode mode
, rtx x
, bool strict_p
)
3467 struct aarch64_address_info addr
;
3469 return aarch64_classify_address (&addr
, x
, mode
, MEM
, strict_p
);
3472 /* Return TRUE if X is a legitimate address for accessing memory in
3473 mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
3476 aarch64_legitimate_address_p (enum machine_mode mode
, rtx x
,
3477 RTX_CODE outer_code
, bool strict_p
)
3479 struct aarch64_address_info addr
;
3481 return aarch64_classify_address (&addr
, x
, mode
, outer_code
, strict_p
);
3484 /* Return TRUE if rtx X is immediate constant 0.0 */
3486 aarch64_float_const_zero_rtx_p (rtx x
)
3490 if (GET_MODE (x
) == VOIDmode
)
3493 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3494 if (REAL_VALUE_MINUS_ZERO (r
))
3495 return !HONOR_SIGNED_ZEROS (GET_MODE (x
));
3496 return REAL_VALUES_EQUAL (r
, dconst0
);
3499 /* Return the fixed registers used for condition codes. */
3502 aarch64_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
3505 *p2
= INVALID_REGNUM
;
3509 /* Emit call insn with PAT and do aarch64-specific handling. */
3512 aarch64_emit_call_insn (rtx pat
)
3514 rtx insn
= emit_call_insn (pat
);
3516 rtx
*fusage
= &CALL_INSN_FUNCTION_USAGE (insn
);
3517 clobber_reg (fusage
, gen_rtx_REG (word_mode
, IP0_REGNUM
));
3518 clobber_reg (fusage
, gen_rtx_REG (word_mode
, IP1_REGNUM
));
3522 aarch64_select_cc_mode (RTX_CODE code
, rtx x
, rtx y
)
3524 /* All floating point compares return CCFP if it is an equality
3525 comparison, and CCFPE otherwise. */
3526 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3553 if ((GET_MODE (x
) == SImode
|| GET_MODE (x
) == DImode
)
3555 && (code
== EQ
|| code
== NE
|| code
== LT
|| code
== GE
)
3556 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
|| GET_CODE (x
) == AND
3557 || GET_CODE (x
) == NEG
))
3560 /* A compare with a shifted operand. Because of canonicalization,
3561 the comparison will have to be swapped when we emit the assembly
3563 if ((GET_MODE (x
) == SImode
|| GET_MODE (x
) == DImode
)
3564 && (REG_P (y
) || GET_CODE (y
) == SUBREG
)
3565 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3566 || GET_CODE (x
) == LSHIFTRT
3567 || GET_CODE (x
) == ZERO_EXTEND
|| GET_CODE (x
) == SIGN_EXTEND
))
3570 /* Similarly for a negated operand, but we can only do this for
3572 if ((GET_MODE (x
) == SImode
|| GET_MODE (x
) == DImode
)
3573 && (REG_P (y
) || GET_CODE (y
) == SUBREG
)
3574 && (code
== EQ
|| code
== NE
)
3575 && GET_CODE (x
) == NEG
)
3578 /* A compare of a mode narrower than SI mode against zero can be done
3579 by extending the value in the comparison. */
3580 if ((GET_MODE (x
) == QImode
|| GET_MODE (x
) == HImode
)
3582 /* Only use sign-extension if we really need it. */
3583 return ((code
== GT
|| code
== GE
|| code
== LE
|| code
== LT
)
3584 ? CC_SESWPmode
: CC_ZESWPmode
);
3586 /* For everything else, return CCmode. */
3591 aarch64_get_condition_code (rtx x
)
3593 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
3594 enum rtx_code comp_code
= GET_CODE (x
);
3596 if (GET_MODE_CLASS (mode
) != MODE_CC
)
3597 mode
= SELECT_CC_MODE (comp_code
, XEXP (x
, 0), XEXP (x
, 1));
3605 case GE
: return AARCH64_GE
;
3606 case GT
: return AARCH64_GT
;
3607 case LE
: return AARCH64_LS
;
3608 case LT
: return AARCH64_MI
;
3609 case NE
: return AARCH64_NE
;
3610 case EQ
: return AARCH64_EQ
;
3611 case ORDERED
: return AARCH64_VC
;
3612 case UNORDERED
: return AARCH64_VS
;
3613 case UNLT
: return AARCH64_LT
;
3614 case UNLE
: return AARCH64_LE
;
3615 case UNGT
: return AARCH64_HI
;
3616 case UNGE
: return AARCH64_PL
;
3617 default: gcc_unreachable ();
3624 case NE
: return AARCH64_NE
;
3625 case EQ
: return AARCH64_EQ
;
3626 case GE
: return AARCH64_GE
;
3627 case GT
: return AARCH64_GT
;
3628 case LE
: return AARCH64_LE
;
3629 case LT
: return AARCH64_LT
;
3630 case GEU
: return AARCH64_CS
;
3631 case GTU
: return AARCH64_HI
;
3632 case LEU
: return AARCH64_LS
;
3633 case LTU
: return AARCH64_CC
;
3634 default: gcc_unreachable ();
3643 case NE
: return AARCH64_NE
;
3644 case EQ
: return AARCH64_EQ
;
3645 case GE
: return AARCH64_LE
;
3646 case GT
: return AARCH64_LT
;
3647 case LE
: return AARCH64_GE
;
3648 case LT
: return AARCH64_GT
;
3649 case GEU
: return AARCH64_LS
;
3650 case GTU
: return AARCH64_CC
;
3651 case LEU
: return AARCH64_CS
;
3652 case LTU
: return AARCH64_HI
;
3653 default: gcc_unreachable ();
3660 case NE
: return AARCH64_NE
;
3661 case EQ
: return AARCH64_EQ
;
3662 case GE
: return AARCH64_PL
;
3663 case LT
: return AARCH64_MI
;
3664 default: gcc_unreachable ();
3671 case NE
: return AARCH64_NE
;
3672 case EQ
: return AARCH64_EQ
;
3673 default: gcc_unreachable ();
3684 bit_count (unsigned HOST_WIDE_INT value
)
3698 aarch64_print_operand (FILE *f
, rtx x
, char code
)
3702 /* An integer or symbol address without a preceding # sign. */
3704 switch (GET_CODE (x
))
3707 fprintf (f
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
3711 output_addr_const (f
, x
);
3715 if (GET_CODE (XEXP (x
, 0)) == PLUS
3716 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)
3718 output_addr_const (f
, x
);
3724 output_operand_lossage ("Unsupported operand for code '%c'", code
);
3729 /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
3733 if (!CONST_INT_P (x
)
3734 || (n
= exact_log2 (INTVAL (x
) & ~7)) <= 0)
3736 output_operand_lossage ("invalid operand for '%%%c'", code
);
3752 output_operand_lossage ("invalid operand for '%%%c'", code
);
3762 /* Print N such that 2^N == X. */
3763 if (!CONST_INT_P (x
) || (n
= exact_log2 (INTVAL (x
))) < 0)
3765 output_operand_lossage ("invalid operand for '%%%c'", code
);
3769 asm_fprintf (f
, "%d", n
);
3774 /* Print the number of non-zero bits in X (a const_int). */
3775 if (!CONST_INT_P (x
))
3777 output_operand_lossage ("invalid operand for '%%%c'", code
);
3781 asm_fprintf (f
, "%u", bit_count (INTVAL (x
)));
3785 /* Print the higher numbered register of a pair (TImode) of regs. */
3786 if (!REG_P (x
) || !GP_REGNUM_P (REGNO (x
) + 1))
3788 output_operand_lossage ("invalid operand for '%%%c'", code
);
3792 asm_fprintf (f
, "%s", reg_names
[REGNO (x
) + 1]);
3796 /* Print a condition (eq, ne, etc). */
3798 /* CONST_TRUE_RTX means always -- that's the default. */
3799 if (x
== const_true_rtx
)
3802 if (!COMPARISON_P (x
))
3804 output_operand_lossage ("invalid operand for '%%%c'", code
);
3808 fputs (aarch64_condition_codes
[aarch64_get_condition_code (x
)], f
);
3812 /* Print the inverse of a condition (eq <-> ne, etc). */
3814 /* CONST_TRUE_RTX means never -- that's the default. */
3815 if (x
== const_true_rtx
)
3821 if (!COMPARISON_P (x
))
3823 output_operand_lossage ("invalid operand for '%%%c'", code
);
3827 fputs (aarch64_condition_codes
[AARCH64_INVERSE_CONDITION_CODE
3828 (aarch64_get_condition_code (x
))], f
);
3836 /* Print a scalar FP/SIMD register name. */
3837 if (!REG_P (x
) || !FP_REGNUM_P (REGNO (x
)))
3839 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code
);
3842 asm_fprintf (f
, "%c%d", code
, REGNO (x
) - V0_REGNUM
);
3849 /* Print the first FP/SIMD register name in a list. */
3850 if (!REG_P (x
) || !FP_REGNUM_P (REGNO (x
)))
3852 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code
);
3855 asm_fprintf (f
, "v%d", REGNO (x
) - V0_REGNUM
+ (code
- 'S'));
3859 /* Print bottom 16 bits of integer constant in hex. */
3860 if (!CONST_INT_P (x
))
3862 output_operand_lossage ("invalid operand for '%%%c'", code
);
3865 asm_fprintf (f
, "0x%wx", UINTVAL (x
) & 0xffff);
3870 /* Print a general register name or the zero register (32-bit or
3873 || (CONST_DOUBLE_P (x
) && aarch64_float_const_zero_rtx_p (x
)))
3875 asm_fprintf (f
, "%czr", code
);
3879 if (REG_P (x
) && GP_REGNUM_P (REGNO (x
)))
3881 asm_fprintf (f
, "%c%d", code
, REGNO (x
) - R0_REGNUM
);
3885 if (REG_P (x
) && REGNO (x
) == SP_REGNUM
)
3887 asm_fprintf (f
, "%ssp", code
== 'w' ? "w" : "");
3894 /* Print a normal operand, if it's a general register, then we
3898 output_operand_lossage ("missing operand");
3902 switch (GET_CODE (x
))
3905 asm_fprintf (f
, "%s", reg_names
[REGNO (x
)]);
3909 aarch64_memory_reference_mode
= GET_MODE (x
);
3910 output_address (XEXP (x
, 0));
3915 output_addr_const (asm_out_file
, x
);
3919 asm_fprintf (f
, "%wd", INTVAL (x
));
3923 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_VECTOR_INT
)
3925 gcc_assert (aarch64_const_vec_all_same_int_p (x
,
3927 HOST_WIDE_INT_MAX
));
3928 asm_fprintf (f
, "%wd", INTVAL (CONST_VECTOR_ELT (x
, 0)));
3930 else if (aarch64_simd_imm_zero_p (x
, GET_MODE (x
)))
3939 /* CONST_DOUBLE can represent a double-width integer.
3940 In this case, the mode of x is VOIDmode. */
3941 if (GET_MODE (x
) == VOIDmode
)
3943 else if (aarch64_float_const_zero_rtx_p (x
))
3948 else if (aarch64_float_const_representable_p (x
))
3951 char float_buf
[buf_size
] = {'\0'};
3953 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3954 real_to_decimal_for_mode (float_buf
, &r
,
3957 asm_fprintf (asm_out_file
, "%s", float_buf
);
3961 output_operand_lossage ("invalid constant");
3964 output_operand_lossage ("invalid operand");
3970 if (GET_CODE (x
) == HIGH
)
3973 switch (aarch64_classify_symbolic_expression (x
, SYMBOL_CONTEXT_ADR
))
3975 case SYMBOL_SMALL_GOT
:
3976 asm_fprintf (asm_out_file
, ":got:");
3979 case SYMBOL_SMALL_TLSGD
:
3980 asm_fprintf (asm_out_file
, ":tlsgd:");
3983 case SYMBOL_SMALL_TLSDESC
:
3984 asm_fprintf (asm_out_file
, ":tlsdesc:");
3987 case SYMBOL_SMALL_GOTTPREL
:
3988 asm_fprintf (asm_out_file
, ":gottprel:");
3991 case SYMBOL_SMALL_TPREL
:
3992 asm_fprintf (asm_out_file
, ":tprel:");
3995 case SYMBOL_TINY_GOT
:
4002 output_addr_const (asm_out_file
, x
);
4006 switch (aarch64_classify_symbolic_expression (x
, SYMBOL_CONTEXT_ADR
))
4008 case SYMBOL_SMALL_GOT
:
4009 asm_fprintf (asm_out_file
, ":lo12:");
4012 case SYMBOL_SMALL_TLSGD
:
4013 asm_fprintf (asm_out_file
, ":tlsgd_lo12:");
4016 case SYMBOL_SMALL_TLSDESC
:
4017 asm_fprintf (asm_out_file
, ":tlsdesc_lo12:");
4020 case SYMBOL_SMALL_GOTTPREL
:
4021 asm_fprintf (asm_out_file
, ":gottprel_lo12:");
4024 case SYMBOL_SMALL_TPREL
:
4025 asm_fprintf (asm_out_file
, ":tprel_lo12_nc:");
4028 case SYMBOL_TINY_GOT
:
4029 asm_fprintf (asm_out_file
, ":got:");
4035 output_addr_const (asm_out_file
, x
);
4040 switch (aarch64_classify_symbolic_expression (x
, SYMBOL_CONTEXT_ADR
))
4042 case SYMBOL_SMALL_TPREL
:
4043 asm_fprintf (asm_out_file
, ":tprel_hi12:");
4048 output_addr_const (asm_out_file
, x
);
4052 output_operand_lossage ("invalid operand prefix '%%%c'", code
);
4058 aarch64_print_operand_address (FILE *f
, rtx x
)
4060 struct aarch64_address_info addr
;
4062 if (aarch64_classify_address (&addr
, x
, aarch64_memory_reference_mode
,
4066 case ADDRESS_REG_IMM
:
4067 if (addr
.offset
== const0_rtx
)
4068 asm_fprintf (f
, "[%s]", reg_names
[REGNO (addr
.base
)]);
4070 asm_fprintf (f
, "[%s, %wd]", reg_names
[REGNO (addr
.base
)],
4071 INTVAL (addr
.offset
));
4074 case ADDRESS_REG_REG
:
4075 if (addr
.shift
== 0)
4076 asm_fprintf (f
, "[%s, %s]", reg_names
[REGNO (addr
.base
)],
4077 reg_names
[REGNO (addr
.offset
)]);
4079 asm_fprintf (f
, "[%s, %s, lsl %u]", reg_names
[REGNO (addr
.base
)],
4080 reg_names
[REGNO (addr
.offset
)], addr
.shift
);
4083 case ADDRESS_REG_UXTW
:
4084 if (addr
.shift
== 0)
4085 asm_fprintf (f
, "[%s, w%d, uxtw]", reg_names
[REGNO (addr
.base
)],
4086 REGNO (addr
.offset
) - R0_REGNUM
);
4088 asm_fprintf (f
, "[%s, w%d, uxtw %u]", reg_names
[REGNO (addr
.base
)],
4089 REGNO (addr
.offset
) - R0_REGNUM
, addr
.shift
);
4092 case ADDRESS_REG_SXTW
:
4093 if (addr
.shift
== 0)
4094 asm_fprintf (f
, "[%s, w%d, sxtw]", reg_names
[REGNO (addr
.base
)],
4095 REGNO (addr
.offset
) - R0_REGNUM
);
4097 asm_fprintf (f
, "[%s, w%d, sxtw %u]", reg_names
[REGNO (addr
.base
)],
4098 REGNO (addr
.offset
) - R0_REGNUM
, addr
.shift
);
4101 case ADDRESS_REG_WB
:
4102 switch (GET_CODE (x
))
4105 asm_fprintf (f
, "[%s, %d]!", reg_names
[REGNO (addr
.base
)],
4106 GET_MODE_SIZE (aarch64_memory_reference_mode
));
4109 asm_fprintf (f
, "[%s], %d", reg_names
[REGNO (addr
.base
)],
4110 GET_MODE_SIZE (aarch64_memory_reference_mode
));
4113 asm_fprintf (f
, "[%s, -%d]!", reg_names
[REGNO (addr
.base
)],
4114 GET_MODE_SIZE (aarch64_memory_reference_mode
));
4117 asm_fprintf (f
, "[%s], -%d", reg_names
[REGNO (addr
.base
)],
4118 GET_MODE_SIZE (aarch64_memory_reference_mode
));
4121 asm_fprintf (f
, "[%s, %wd]!", reg_names
[REGNO (addr
.base
)],
4122 INTVAL (addr
.offset
));
4125 asm_fprintf (f
, "[%s], %wd", reg_names
[REGNO (addr
.base
)],
4126 INTVAL (addr
.offset
));
4133 case ADDRESS_LO_SUM
:
4134 asm_fprintf (f
, "[%s, #:lo12:", reg_names
[REGNO (addr
.base
)]);
4135 output_addr_const (f
, addr
.offset
);
4136 asm_fprintf (f
, "]");
4139 case ADDRESS_SYMBOLIC
:
4143 output_addr_const (f
, x
);
4147 aarch64_label_mentioned_p (rtx x
)
4152 if (GET_CODE (x
) == LABEL_REF
)
4155 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
4156 referencing instruction, but they are constant offsets, not
4158 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLS
)
4161 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
4162 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
4168 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4169 if (aarch64_label_mentioned_p (XVECEXP (x
, i
, j
)))
4172 else if (fmt
[i
] == 'e' && aarch64_label_mentioned_p (XEXP (x
, i
)))
4179 /* Implement REGNO_REG_CLASS. */
4182 aarch64_regno_regclass (unsigned regno
)
4184 if (GP_REGNUM_P (regno
))
4185 return GENERAL_REGS
;
4187 if (regno
== SP_REGNUM
)
4190 if (regno
== FRAME_POINTER_REGNUM
4191 || regno
== ARG_POINTER_REGNUM
)
4192 return POINTER_REGS
;
4194 if (FP_REGNUM_P (regno
))
4195 return FP_LO_REGNUM_P (regno
) ? FP_LO_REGS
: FP_REGS
;
4200 /* Try a machine-dependent way of reloading an illegitimate address
4201 operand. If we find one, push the reload and return the new rtx. */
4204 aarch64_legitimize_reload_address (rtx
*x_p
,
4205 enum machine_mode mode
,
4206 int opnum
, int type
,
4207 int ind_levels ATTRIBUTE_UNUSED
)
4211 /* Do not allow mem (plus (reg, const)) if vector struct mode. */
4212 if (aarch64_vect_struct_mode_p (mode
)
4213 && GET_CODE (x
) == PLUS
4214 && REG_P (XEXP (x
, 0))
4215 && CONST_INT_P (XEXP (x
, 1)))
4219 push_reload (orig_rtx
, NULL_RTX
, x_p
, NULL
,
4220 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4221 opnum
, (enum reload_type
) type
);
4225 /* We must recognize output that we have already generated ourselves. */
4226 if (GET_CODE (x
) == PLUS
4227 && GET_CODE (XEXP (x
, 0)) == PLUS
4228 && REG_P (XEXP (XEXP (x
, 0), 0))
4229 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4230 && CONST_INT_P (XEXP (x
, 1)))
4232 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4233 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4234 opnum
, (enum reload_type
) type
);
4238 /* We wish to handle large displacements off a base register by splitting
4239 the addend across an add and the mem insn. This can cut the number of
4240 extra insns needed from 3 to 1. It is only useful for load/store of a
4241 single register with 12 bit offset field. */
4242 if (GET_CODE (x
) == PLUS
4243 && REG_P (XEXP (x
, 0))
4244 && CONST_INT_P (XEXP (x
, 1))
4245 && HARD_REGISTER_P (XEXP (x
, 0))
4248 && aarch64_regno_ok_for_base_p (REGNO (XEXP (x
, 0)), true))
4250 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
4251 HOST_WIDE_INT low
= val
& 0xfff;
4252 HOST_WIDE_INT high
= val
- low
;
4255 enum machine_mode xmode
= GET_MODE (x
);
4257 /* In ILP32, xmode can be either DImode or SImode. */
4258 gcc_assert (xmode
== DImode
|| xmode
== SImode
);
4260 /* Reload non-zero BLKmode offsets. This is because we cannot ascertain
4261 BLKmode alignment. */
4262 if (GET_MODE_SIZE (mode
) == 0)
4265 offs
= low
% GET_MODE_SIZE (mode
);
4267 /* Align misaligned offset by adjusting high part to compensate. */
4270 if (aarch64_uimm12_shift (high
+ offs
))
4279 offs
= GET_MODE_SIZE (mode
) - offs
;
4281 high
= high
+ (low
& 0x1000) - offs
;
4286 /* Check for overflow. */
4287 if (high
+ low
!= val
)
4290 cst
= GEN_INT (high
);
4291 if (!aarch64_uimm12_shift (high
))
4292 cst
= force_const_mem (xmode
, cst
);
4294 /* Reload high part into base reg, leaving the low part
4295 in the mem instruction.
4296 Note that replacing this gen_rtx_PLUS with plus_constant is
4297 wrong in this case because we rely on the
4298 (plus (plus reg c1) c2) structure being preserved so that
4299 XEXP (*p, 0) in push_reload below uses the correct term. */
4300 x
= gen_rtx_PLUS (xmode
,
4301 gen_rtx_PLUS (xmode
, XEXP (x
, 0), cst
),
4304 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4305 BASE_REG_CLASS
, xmode
, VOIDmode
, 0, 0,
4306 opnum
, (enum reload_type
) type
);
4315 aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED
, rtx x
,
4317 enum machine_mode mode
,
4318 secondary_reload_info
*sri
)
4320 /* Without the TARGET_SIMD instructions we cannot move a Q register
4321 to a Q register directly. We need a scratch. */
4322 if (REG_P (x
) && (mode
== TFmode
|| mode
== TImode
) && mode
== GET_MODE (x
)
4323 && FP_REGNUM_P (REGNO (x
)) && !TARGET_SIMD
4324 && reg_class_subset_p (rclass
, FP_REGS
))
4327 sri
->icode
= CODE_FOR_aarch64_reload_movtf
;
4328 else if (mode
== TImode
)
4329 sri
->icode
= CODE_FOR_aarch64_reload_movti
;
4333 /* A TFmode or TImode memory access should be handled via an FP_REGS
4334 because AArch64 has richer addressing modes for LDR/STR instructions
4335 than LDP/STP instructions. */
4336 if (!TARGET_GENERAL_REGS_ONLY
&& rclass
== GENERAL_REGS
4337 && GET_MODE_SIZE (mode
) == 16 && MEM_P (x
))
4340 if (rclass
== FP_REGS
&& (mode
== TImode
|| mode
== TFmode
) && CONSTANT_P(x
))
4341 return GENERAL_REGS
;
4347 aarch64_can_eliminate (const int from
, const int to
)
4349 /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
4350 HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
4352 if (frame_pointer_needed
)
4354 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
4356 if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
4358 if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
4359 && !cfun
->calls_alloca
)
4361 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
4371 aarch64_initial_elimination_offset (unsigned from
, unsigned to
)
4373 aarch64_layout_frame ();
4375 if (to
== HARD_FRAME_POINTER_REGNUM
)
4377 if (from
== ARG_POINTER_REGNUM
)
4378 return cfun
->machine
->frame
.frame_size
- crtl
->outgoing_args_size
;
4380 if (from
== FRAME_POINTER_REGNUM
)
4381 return (cfun
->machine
->frame
.hard_fp_offset
4382 - cfun
->machine
->frame
.saved_varargs_size
);
4385 if (to
== STACK_POINTER_REGNUM
)
4387 if (from
== FRAME_POINTER_REGNUM
)
4388 return (cfun
->machine
->frame
.frame_size
4389 - cfun
->machine
->frame
.saved_varargs_size
);
4392 return cfun
->machine
->frame
.frame_size
;
4395 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
4399 aarch64_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4403 return get_hard_reg_initial_val (Pmode
, LR_REGNUM
);
4408 aarch64_asm_trampoline_template (FILE *f
)
4412 asm_fprintf (f
, "\tldr\tw%d, .+16\n", IP1_REGNUM
- R0_REGNUM
);
4413 asm_fprintf (f
, "\tldr\tw%d, .+16\n", STATIC_CHAIN_REGNUM
- R0_REGNUM
);
4417 asm_fprintf (f
, "\tldr\t%s, .+16\n", reg_names
[IP1_REGNUM
]);
4418 asm_fprintf (f
, "\tldr\t%s, .+20\n", reg_names
[STATIC_CHAIN_REGNUM
]);
4420 asm_fprintf (f
, "\tbr\t%s\n", reg_names
[IP1_REGNUM
]);
4421 assemble_aligned_integer (4, const0_rtx
);
4422 assemble_aligned_integer (POINTER_BYTES
, const0_rtx
);
4423 assemble_aligned_integer (POINTER_BYTES
, const0_rtx
);
4427 aarch64_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
4429 rtx fnaddr
, mem
, a_tramp
;
4430 const int tramp_code_sz
= 16;
4432 /* Don't need to copy the trailing D-words, we fill those in below. */
4433 emit_block_move (m_tramp
, assemble_trampoline_template (),
4434 GEN_INT (tramp_code_sz
), BLOCK_OP_NORMAL
);
4435 mem
= adjust_address (m_tramp
, ptr_mode
, tramp_code_sz
);
4436 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
4437 if (GET_MODE (fnaddr
) != ptr_mode
)
4438 fnaddr
= convert_memory_address (ptr_mode
, fnaddr
);
4439 emit_move_insn (mem
, fnaddr
);
4441 mem
= adjust_address (m_tramp
, ptr_mode
, tramp_code_sz
+ POINTER_BYTES
);
4442 emit_move_insn (mem
, chain_value
);
4444 /* XXX We should really define a "clear_cache" pattern and use
4445 gen_clear_cache(). */
4446 a_tramp
= XEXP (m_tramp
, 0);
4447 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__clear_cache"),
4448 LCT_NORMAL
, VOIDmode
, 2, a_tramp
, ptr_mode
,
4449 plus_constant (ptr_mode
, a_tramp
, TRAMPOLINE_SIZE
),
4453 static unsigned char
4454 aarch64_class_max_nregs (reg_class_t regclass
, enum machine_mode mode
)
4458 case CALLER_SAVE_REGS
:
4465 aarch64_vector_mode_p (mode
) ? (GET_MODE_SIZE (mode
) + 15) / 16 :
4466 (GET_MODE_SIZE (mode
) + 7) / 8;
4480 aarch64_preferred_reload_class (rtx x
, reg_class_t regclass
)
4482 if (regclass
== POINTER_REGS
)
4483 return GENERAL_REGS
;
4485 if (regclass
== STACK_REG
)
4488 && reg_class_subset_p (REGNO_REG_CLASS (REGNO (x
)), POINTER_REGS
))
4494 /* If it's an integer immediate that MOVI can't handle, then
4495 FP_REGS is not an option, so we return NO_REGS instead. */
4496 if (CONST_INT_P (x
) && reg_class_subset_p (regclass
, FP_REGS
)
4497 && !aarch64_simd_imm_scalar_p (x
, GET_MODE (x
)))
4500 /* Register eliminiation can result in a request for
4501 SP+constant->FP_REGS. We cannot support such operations which
4502 use SP as source and an FP_REG as destination, so reject out
4504 if (! reg_class_subset_p (regclass
, GENERAL_REGS
) && GET_CODE (x
) == PLUS
)
4506 rtx lhs
= XEXP (x
, 0);
4508 /* Look through a possible SUBREG introduced by ILP32. */
4509 if (GET_CODE (lhs
) == SUBREG
)
4510 lhs
= SUBREG_REG (lhs
);
4512 gcc_assert (REG_P (lhs
));
4513 gcc_assert (reg_class_subset_p (REGNO_REG_CLASS (REGNO (lhs
)),
4522 aarch64_asm_output_labelref (FILE* f
, const char *name
)
4524 asm_fprintf (f
, "%U%s", name
);
4528 aarch64_elf_asm_constructor (rtx symbol
, int priority
)
4530 if (priority
== DEFAULT_INIT_PRIORITY
)
4531 default_ctor_section_asm_out_constructor (symbol
, priority
);
4536 snprintf (buf
, sizeof (buf
), ".init_array.%.5u", priority
);
4537 s
= get_section (buf
, SECTION_WRITE
, NULL
);
4538 switch_to_section (s
);
4539 assemble_align (POINTER_SIZE
);
4540 assemble_aligned_integer (POINTER_BYTES
, symbol
);
4545 aarch64_elf_asm_destructor (rtx symbol
, int priority
)
4547 if (priority
== DEFAULT_INIT_PRIORITY
)
4548 default_dtor_section_asm_out_destructor (symbol
, priority
);
4553 snprintf (buf
, sizeof (buf
), ".fini_array.%.5u", priority
);
4554 s
= get_section (buf
, SECTION_WRITE
, NULL
);
4555 switch_to_section (s
);
4556 assemble_align (POINTER_SIZE
);
4557 assemble_aligned_integer (POINTER_BYTES
, symbol
);
4562 aarch64_output_casesi (rtx
*operands
)
4566 rtx diff_vec
= PATTERN (NEXT_INSN (as_a
<rtx_insn
*> (operands
[2])));
4568 static const char *const patterns
[4][2] =
4571 "ldrb\t%w3, [%0,%w1,uxtw]",
4572 "add\t%3, %4, %w3, sxtb #2"
4575 "ldrh\t%w3, [%0,%w1,uxtw #1]",
4576 "add\t%3, %4, %w3, sxth #2"
4579 "ldr\t%w3, [%0,%w1,uxtw #2]",
4580 "add\t%3, %4, %w3, sxtw #2"
4582 /* We assume that DImode is only generated when not optimizing and
4583 that we don't really need 64-bit address offsets. That would
4584 imply an object file with 8GB of code in a single function! */
4586 "ldr\t%w3, [%0,%w1,uxtw #2]",
4587 "add\t%3, %4, %w3, sxtw #2"
4591 gcc_assert (GET_CODE (diff_vec
) == ADDR_DIFF_VEC
);
4593 index
= exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec
)));
4595 gcc_assert (index
>= 0 && index
<= 3);
4597 /* Need to implement table size reduction, by chaning the code below. */
4598 output_asm_insn (patterns
[index
][0], operands
);
4599 ASM_GENERATE_INTERNAL_LABEL (label
, "Lrtx", CODE_LABEL_NUMBER (operands
[2]));
4600 snprintf (buf
, sizeof (buf
),
4601 "adr\t%%4, %s", targetm
.strip_name_encoding (label
));
4602 output_asm_insn (buf
, operands
);
4603 output_asm_insn (patterns
[index
][1], operands
);
4604 output_asm_insn ("br\t%3", operands
);
4605 assemble_label (asm_out_file
, label
);
4610 /* Return size in bits of an arithmetic operand which is shifted/scaled and
4611 masked such that it is suitable for a UXTB, UXTH, or UXTW extend
4615 aarch64_uxt_size (int shift
, HOST_WIDE_INT mask
)
4617 if (shift
>= 0 && shift
<= 3)
4620 for (size
= 8; size
<= 32; size
*= 2)
4622 HOST_WIDE_INT bits
= ((HOST_WIDE_INT
)1U << size
) - 1;
4623 if (mask
== bits
<< shift
)
4631 aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
4632 const_rtx x ATTRIBUTE_UNUSED
)
4634 /* We can't use blocks for constants when we're using a per-function
4640 aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED
,
4641 rtx x ATTRIBUTE_UNUSED
,
4642 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
4644 /* Force all constant pool entries into the current function section. */
4645 return function_section (current_function_decl
);
4651 /* Helper function for rtx cost calculation. Strip a shift expression
4652 from X. Returns the inner operand if successful, or the original
4653 expression on failure. */
4655 aarch64_strip_shift (rtx x
)
4659 /* We accept both ROTATERT and ROTATE: since the RHS must be a constant
4660 we can convert both to ROR during final output. */
4661 if ((GET_CODE (op
) == ASHIFT
4662 || GET_CODE (op
) == ASHIFTRT
4663 || GET_CODE (op
) == LSHIFTRT
4664 || GET_CODE (op
) == ROTATERT
4665 || GET_CODE (op
) == ROTATE
)
4666 && CONST_INT_P (XEXP (op
, 1)))
4667 return XEXP (op
, 0);
4669 if (GET_CODE (op
) == MULT
4670 && CONST_INT_P (XEXP (op
, 1))
4671 && ((unsigned) exact_log2 (INTVAL (XEXP (op
, 1)))) < 64)
4672 return XEXP (op
, 0);
4677 /* Helper function for rtx cost calculation. Strip an extend
4678 expression from X. Returns the inner operand if successful, or the
4679 original expression on failure. We deal with a number of possible
4680 canonicalization variations here. */
4682 aarch64_strip_extend (rtx x
)
4686 /* Zero and sign extraction of a widened value. */
4687 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
4688 && XEXP (op
, 2) == const0_rtx
4689 && GET_CODE (XEXP (op
, 0)) == MULT
4690 && aarch64_is_extend_from_extract (GET_MODE (op
), XEXP (XEXP (op
, 0), 1),
4692 return XEXP (XEXP (op
, 0), 0);
4694 /* It can also be represented (for zero-extend) as an AND with an
4696 if (GET_CODE (op
) == AND
4697 && GET_CODE (XEXP (op
, 0)) == MULT
4698 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
4699 && CONST_INT_P (XEXP (op
, 1))
4700 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op
, 0), 1))),
4701 INTVAL (XEXP (op
, 1))) != 0)
4702 return XEXP (XEXP (op
, 0), 0);
4704 /* Now handle extended register, as this may also have an optional
4705 left shift by 1..4. */
4706 if (GET_CODE (op
) == ASHIFT
4707 && CONST_INT_P (XEXP (op
, 1))
4708 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (op
, 1))) <= 4)
4711 if (GET_CODE (op
) == ZERO_EXTEND
4712 || GET_CODE (op
) == SIGN_EXTEND
)
4721 /* Helper function for rtx cost calculation. Calculate the cost of
4722 a MULT, which may be part of a multiply-accumulate rtx. Return
4723 the calculated cost of the expression, recursing manually in to
4724 operands where needed. */
4727 aarch64_rtx_mult_cost (rtx x
, int code
, int outer
, bool speed
)
4730 const struct cpu_cost_table
*extra_cost
4731 = aarch64_tune_params
->insn_extra_cost
;
4733 bool maybe_fma
= (outer
== PLUS
|| outer
== MINUS
);
4734 enum machine_mode mode
= GET_MODE (x
);
4736 gcc_checking_assert (code
== MULT
);
4741 if (VECTOR_MODE_P (mode
))
4742 mode
= GET_MODE_INNER (mode
);
4744 /* Integer multiply/fma. */
4745 if (GET_MODE_CLASS (mode
) == MODE_INT
)
4747 /* The multiply will be canonicalized as a shift, cost it as such. */
4748 if (CONST_INT_P (op1
)
4749 && exact_log2 (INTVAL (op1
)) > 0)
4754 /* ADD (shifted register). */
4755 cost
+= extra_cost
->alu
.arith_shift
;
4757 /* LSL (immediate). */
4758 cost
+= extra_cost
->alu
.shift
;
4761 cost
+= rtx_cost (op0
, GET_CODE (op0
), 0, speed
);
4766 /* Integer multiplies or FMAs have zero/sign extending variants. */
4767 if ((GET_CODE (op0
) == ZERO_EXTEND
4768 && GET_CODE (op1
) == ZERO_EXTEND
)
4769 || (GET_CODE (op0
) == SIGN_EXTEND
4770 && GET_CODE (op1
) == SIGN_EXTEND
))
4772 cost
+= rtx_cost (XEXP (op0
, 0), MULT
, 0, speed
)
4773 + rtx_cost (XEXP (op1
, 0), MULT
, 1, speed
);
4778 /* MADD/SMADDL/UMADDL. */
4779 cost
+= extra_cost
->mult
[0].extend_add
;
4781 /* MUL/SMULL/UMULL. */
4782 cost
+= extra_cost
->mult
[0].extend
;
4788 /* This is either an integer multiply or an FMA. In both cases
4789 we want to recurse and cost the operands. */
4790 cost
+= rtx_cost (op0
, MULT
, 0, speed
)
4791 + rtx_cost (op1
, MULT
, 1, speed
);
4797 cost
+= extra_cost
->mult
[mode
== DImode
].add
;
4800 cost
+= extra_cost
->mult
[mode
== DImode
].simple
;
4809 /* Floating-point FMA/FMUL can also support negations of the
4811 if (GET_CODE (op0
) == NEG
)
4812 op0
= XEXP (op0
, 0);
4813 if (GET_CODE (op1
) == NEG
)
4814 op1
= XEXP (op1
, 0);
4817 /* FMADD/FNMADD/FNMSUB/FMSUB. */
4818 cost
+= extra_cost
->fp
[mode
== DFmode
].fma
;
4821 cost
+= extra_cost
->fp
[mode
== DFmode
].mult
;
4824 cost
+= rtx_cost (op0
, MULT
, 0, speed
)
4825 + rtx_cost (op1
, MULT
, 1, speed
);
4831 aarch64_address_cost (rtx x
,
4832 enum machine_mode mode
,
4833 addr_space_t as ATTRIBUTE_UNUSED
,
4836 enum rtx_code c
= GET_CODE (x
);
4837 const struct cpu_addrcost_table
*addr_cost
= aarch64_tune_params
->addr_cost
;
4838 struct aarch64_address_info info
;
4842 if (!aarch64_classify_address (&info
, x
, mode
, c
, false))
4844 if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
)
4846 /* This is a CONST or SYMBOL ref which will be split
4847 in a different way depending on the code model in use.
4848 Cost it through the generic infrastructure. */
4849 int cost_symbol_ref
= rtx_cost (x
, MEM
, 1, speed
);
4850 /* Divide through by the cost of one instruction to
4851 bring it to the same units as the address costs. */
4852 cost_symbol_ref
/= COSTS_N_INSNS (1);
4853 /* The cost is then the cost of preparing the address,
4854 followed by an immediate (possibly 0) offset. */
4855 return cost_symbol_ref
+ addr_cost
->imm_offset
;
4859 /* This is most likely a jump table from a case
4861 return addr_cost
->register_offset
;
4867 case ADDRESS_LO_SUM
:
4868 case ADDRESS_SYMBOLIC
:
4869 case ADDRESS_REG_IMM
:
4870 cost
+= addr_cost
->imm_offset
;
4873 case ADDRESS_REG_WB
:
4874 if (c
== PRE_INC
|| c
== PRE_DEC
|| c
== PRE_MODIFY
)
4875 cost
+= addr_cost
->pre_modify
;
4876 else if (c
== POST_INC
|| c
== POST_DEC
|| c
== POST_MODIFY
)
4877 cost
+= addr_cost
->post_modify
;
4883 case ADDRESS_REG_REG
:
4884 cost
+= addr_cost
->register_offset
;
4887 case ADDRESS_REG_UXTW
:
4888 case ADDRESS_REG_SXTW
:
4889 cost
+= addr_cost
->register_extend
;
4899 /* For the sake of calculating the cost of the shifted register
4900 component, we can treat same sized modes in the same way. */
4901 switch (GET_MODE_BITSIZE (mode
))
4904 cost
+= addr_cost
->addr_scale_costs
.hi
;
4908 cost
+= addr_cost
->addr_scale_costs
.si
;
4912 cost
+= addr_cost
->addr_scale_costs
.di
;
4915 /* We can't tell, or this is a 128-bit vector. */
4917 cost
+= addr_cost
->addr_scale_costs
.ti
;
4925 /* Return true if the RTX X in mode MODE is a zero or sign extract
4926 usable in an ADD or SUB (extended register) instruction. */
4928 aarch64_rtx_arith_op_extract_p (rtx x
, enum machine_mode mode
)
4930 /* Catch add with a sign extract.
4931 This is add_<optab><mode>_multp2. */
4932 if (GET_CODE (x
) == SIGN_EXTRACT
4933 || GET_CODE (x
) == ZERO_EXTRACT
)
4935 rtx op0
= XEXP (x
, 0);
4936 rtx op1
= XEXP (x
, 1);
4937 rtx op2
= XEXP (x
, 2);
4939 if (GET_CODE (op0
) == MULT
4940 && CONST_INT_P (op1
)
4941 && op2
== const0_rtx
4942 && CONST_INT_P (XEXP (op0
, 1))
4943 && aarch64_is_extend_from_extract (mode
,
4955 aarch64_frint_unspec_p (unsigned int u
)
4973 /* Calculate the cost of calculating (if_then_else (OP0) (OP1) (OP2)),
4974 storing it in *COST. Result is true if the total cost of the operation
4975 has now been calculated. */
4977 aarch64_if_then_else_costs (rtx op0
, rtx op1
, rtx op2
, int *cost
, bool speed
)
4981 enum rtx_code cmpcode
;
4983 if (COMPARISON_P (op0
))
4985 inner
= XEXP (op0
, 0);
4986 comparator
= XEXP (op0
, 1);
4987 cmpcode
= GET_CODE (op0
);
4992 comparator
= const0_rtx
;
4996 if (GET_CODE (op1
) == PC
|| GET_CODE (op2
) == PC
)
4998 /* Conditional branch. */
4999 if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_CC
)
5003 if (cmpcode
== NE
|| cmpcode
== EQ
)
5005 if (comparator
== const0_rtx
)
5007 /* TBZ/TBNZ/CBZ/CBNZ. */
5008 if (GET_CODE (inner
) == ZERO_EXTRACT
)
5010 *cost
+= rtx_cost (XEXP (inner
, 0), ZERO_EXTRACT
,
5014 *cost
+= rtx_cost (inner
, cmpcode
, 0, speed
);
5019 else if (cmpcode
== LT
|| cmpcode
== GE
)
5022 if (comparator
== const0_rtx
)
5027 else if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_CC
)
5029 /* It's a conditional operation based on the status flags,
5030 so it must be some flavor of CSEL. */
5032 /* CSNEG, CSINV, and CSINC are handled for free as part of CSEL. */
5033 if (GET_CODE (op1
) == NEG
5034 || GET_CODE (op1
) == NOT
5035 || (GET_CODE (op1
) == PLUS
&& XEXP (op1
, 1) == const1_rtx
))
5036 op1
= XEXP (op1
, 0);
5038 *cost
+= rtx_cost (op1
, IF_THEN_ELSE
, 1, speed
);
5039 *cost
+= rtx_cost (op2
, IF_THEN_ELSE
, 2, speed
);
5043 /* We don't know what this is, cost all operands. */
5047 /* Calculate the cost of calculating X, storing it in *COST. Result
5048 is true if the total cost of the operation has now been calculated. */
5050 aarch64_rtx_costs (rtx x
, int code
, int outer ATTRIBUTE_UNUSED
,
5051 int param ATTRIBUTE_UNUSED
, int *cost
, bool speed
)
5054 const struct cpu_cost_table
*extra_cost
5055 = aarch64_tune_params
->insn_extra_cost
;
5056 enum machine_mode mode
= GET_MODE (x
);
5058 /* By default, assume that everything has equivalent cost to the
5059 cheapest instruction. Any additional costs are applied as a delta
5060 above this default. */
5061 *cost
= COSTS_N_INSNS (1);
5063 /* TODO: The cost infrastructure currently does not handle
5064 vector operations. Assume that all vector operations
5065 are equally expensive. */
5066 if (VECTOR_MODE_P (mode
))
5069 *cost
+= extra_cost
->vect
.alu
;
5076 /* The cost depends entirely on the operands to SET. */
5081 switch (GET_CODE (op0
))
5086 rtx address
= XEXP (op0
, 0);
5087 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5088 *cost
+= extra_cost
->ldst
.store
;
5089 else if (mode
== SFmode
)
5090 *cost
+= extra_cost
->ldst
.storef
;
5091 else if (mode
== DFmode
)
5092 *cost
+= extra_cost
->ldst
.stored
;
5095 COSTS_N_INSNS (aarch64_address_cost (address
, mode
,
5099 *cost
+= rtx_cost (op1
, SET
, 1, speed
);
5103 if (! REG_P (SUBREG_REG (op0
)))
5104 *cost
+= rtx_cost (SUBREG_REG (op0
), SET
, 0, speed
);
5108 /* const0_rtx is in general free, but we will use an
5109 instruction to set a register to 0. */
5110 if (REG_P (op1
) || op1
== const0_rtx
)
5112 /* The cost is 1 per register copied. */
5113 int n_minus_1
= (GET_MODE_SIZE (GET_MODE (op0
)) - 1)
5115 *cost
= COSTS_N_INSNS (n_minus_1
+ 1);
5118 /* Cost is just the cost of the RHS of the set. */
5119 *cost
+= rtx_cost (op1
, SET
, 1, speed
);
5124 /* Bit-field insertion. Strip any redundant widening of
5125 the RHS to meet the width of the target. */
5126 if (GET_CODE (op1
) == SUBREG
)
5127 op1
= SUBREG_REG (op1
);
5128 if ((GET_CODE (op1
) == ZERO_EXTEND
5129 || GET_CODE (op1
) == SIGN_EXTEND
)
5130 && CONST_INT_P (XEXP (op0
, 1))
5131 && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1
, 0)))
5132 >= INTVAL (XEXP (op0
, 1))))
5133 op1
= XEXP (op1
, 0);
5135 if (CONST_INT_P (op1
))
5137 /* MOV immediate is assumed to always be cheap. */
5138 *cost
= COSTS_N_INSNS (1);
5144 *cost
+= extra_cost
->alu
.bfi
;
5145 *cost
+= rtx_cost (op1
, (enum rtx_code
) code
, 1, speed
);
5151 /* We can't make sense of this, assume default cost. */
5152 *cost
= COSTS_N_INSNS (1);
5158 /* If an instruction can incorporate a constant within the
5159 instruction, the instruction's expression avoids calling
5160 rtx_cost() on the constant. If rtx_cost() is called on a
5161 constant, then it is usually because the constant must be
5162 moved into a register by one or more instructions.
5164 The exception is constant 0, which can be expressed
5165 as XZR/WZR and is therefore free. The exception to this is
5166 if we have (set (reg) (const0_rtx)) in which case we must cost
5167 the move. However, we can catch that when we cost the SET, so
5168 we don't need to consider that here. */
5169 if (x
== const0_rtx
)
5173 /* To an approximation, building any other constant is
5174 proportionally expensive to the number of instructions
5175 required to build that constant. This is true whether we
5176 are compiling for SPEED or otherwise. */
5177 *cost
= COSTS_N_INSNS (aarch64_build_constant (0,
5186 /* mov[df,sf]_aarch64. */
5187 if (aarch64_float_const_representable_p (x
))
5188 /* FMOV (scalar immediate). */
5189 *cost
+= extra_cost
->fp
[mode
== DFmode
].fpconst
;
5190 else if (!aarch64_float_const_zero_rtx_p (x
))
5192 /* This will be a load from memory. */
5194 *cost
+= extra_cost
->ldst
.loadd
;
5196 *cost
+= extra_cost
->ldst
.loadf
;
5199 /* Otherwise this is +0.0. We get this using MOVI d0, #0
5200 or MOV v0.s[0], wzr - neither of which are modeled by the
5201 cost tables. Just use the default cost. */
5211 /* For loads we want the base cost of a load, plus an
5212 approximation for the additional cost of the addressing
5214 rtx address
= XEXP (x
, 0);
5215 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5216 *cost
+= extra_cost
->ldst
.load
;
5217 else if (mode
== SFmode
)
5218 *cost
+= extra_cost
->ldst
.loadf
;
5219 else if (mode
== DFmode
)
5220 *cost
+= extra_cost
->ldst
.loadd
;
5223 COSTS_N_INSNS (aarch64_address_cost (address
, mode
,
5232 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
5234 if (GET_RTX_CLASS (GET_CODE (op0
)) == RTX_COMPARE
5235 || GET_RTX_CLASS (GET_CODE (op0
)) == RTX_COMM_COMPARE
)
5238 *cost
+= rtx_cost (XEXP (op0
, 0), NEG
, 0, speed
);
5242 /* Cost this as SUB wzr, X. */
5243 op0
= CONST0_RTX (GET_MODE (x
));
5248 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
5250 /* Support (neg(fma...)) as a single instruction only if
5251 sign of zeros is unimportant. This matches the decision
5252 making in aarch64.md. */
5253 if (GET_CODE (op0
) == FMA
&& !HONOR_SIGNED_ZEROS (GET_MODE (op0
)))
5256 *cost
= rtx_cost (op0
, NEG
, 0, speed
);
5261 *cost
+= extra_cost
->fp
[mode
== DFmode
].neg
;
5270 *cost
+= extra_cost
->alu
.clz
;
5278 if (op1
== const0_rtx
5279 && GET_CODE (op0
) == AND
)
5285 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
)
5287 /* TODO: A write to the CC flags possibly costs extra, this
5288 needs encoding in the cost tables. */
5290 /* CC_ZESWPmode supports zero extend for free. */
5291 if (GET_MODE (x
) == CC_ZESWPmode
&& GET_CODE (op0
) == ZERO_EXTEND
)
5292 op0
= XEXP (op0
, 0);
5295 if (GET_CODE (op0
) == AND
)
5301 if (GET_CODE (op0
) == PLUS
)
5303 /* ADDS (and CMN alias). */
5308 if (GET_CODE (op0
) == MINUS
)
5315 if (GET_CODE (op1
) == NEG
)
5319 *cost
+= extra_cost
->alu
.arith
;
5321 *cost
+= rtx_cost (op0
, COMPARE
, 0, speed
);
5322 *cost
+= rtx_cost (XEXP (op1
, 0), NEG
, 1, speed
);
5328 Compare can freely swap the order of operands, and
5329 canonicalization puts the more complex operation first.
5330 But the integer MINUS logic expects the shift/extend
5331 operation in op1. */
5333 || (GET_CODE (op0
) == SUBREG
&& REG_P (SUBREG_REG (op0
)))))
5341 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
5345 *cost
+= extra_cost
->fp
[mode
== DFmode
].compare
;
5347 if (CONST_DOUBLE_P (op1
) && aarch64_float_const_zero_rtx_p (op1
))
5349 /* FCMP supports constant 0.0 for no extra cost. */
5363 /* Detect valid immediates. */
5364 if ((GET_MODE_CLASS (mode
) == MODE_INT
5365 || (GET_MODE_CLASS (mode
) == MODE_CC
5366 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
))
5367 && CONST_INT_P (op1
)
5368 && aarch64_uimm12_shift (INTVAL (op1
)))
5370 *cost
+= rtx_cost (op0
, MINUS
, 0, speed
);
5373 /* SUB(S) (immediate). */
5374 *cost
+= extra_cost
->alu
.arith
;
5379 /* Look for SUB (extended register). */
5380 if (aarch64_rtx_arith_op_extract_p (op1
, mode
))
5383 *cost
+= extra_cost
->alu
.arith_shift
;
5385 *cost
+= rtx_cost (XEXP (XEXP (op1
, 0), 0),
5386 (enum rtx_code
) GET_CODE (op1
),
5391 rtx new_op1
= aarch64_strip_extend (op1
);
5393 /* Cost this as an FMA-alike operation. */
5394 if ((GET_CODE (new_op1
) == MULT
5395 || GET_CODE (new_op1
) == ASHIFT
)
5398 *cost
+= aarch64_rtx_mult_cost (new_op1
, MULT
,
5399 (enum rtx_code
) code
,
5401 *cost
+= rtx_cost (op0
, MINUS
, 0, speed
);
5405 *cost
+= rtx_cost (new_op1
, MINUS
, 1, speed
);
5409 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5411 *cost
+= extra_cost
->alu
.arith
;
5412 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5414 *cost
+= extra_cost
->fp
[mode
== DFmode
].addsub
;
5427 if (GET_RTX_CLASS (GET_CODE (op0
)) == RTX_COMPARE
5428 || GET_RTX_CLASS (GET_CODE (op0
)) == RTX_COMM_COMPARE
)
5431 *cost
+= rtx_cost (XEXP (op0
, 0), PLUS
, 0, speed
);
5432 *cost
+= rtx_cost (op1
, PLUS
, 1, speed
);
5436 if (GET_MODE_CLASS (mode
) == MODE_INT
5437 && CONST_INT_P (op1
)
5438 && aarch64_uimm12_shift (INTVAL (op1
)))
5440 *cost
+= rtx_cost (op0
, PLUS
, 0, speed
);
5443 /* ADD (immediate). */
5444 *cost
+= extra_cost
->alu
.arith
;
5448 /* Look for ADD (extended register). */
5449 if (aarch64_rtx_arith_op_extract_p (op0
, mode
))
5452 *cost
+= extra_cost
->alu
.arith_shift
;
5454 *cost
+= rtx_cost (XEXP (XEXP (op0
, 0), 0),
5455 (enum rtx_code
) GET_CODE (op0
),
5460 /* Strip any extend, leave shifts behind as we will
5461 cost them through mult_cost. */
5462 new_op0
= aarch64_strip_extend (op0
);
5464 if (GET_CODE (new_op0
) == MULT
5465 || GET_CODE (new_op0
) == ASHIFT
)
5467 *cost
+= aarch64_rtx_mult_cost (new_op0
, MULT
, PLUS
,
5469 *cost
+= rtx_cost (op1
, PLUS
, 1, speed
);
5473 *cost
+= (rtx_cost (new_op0
, PLUS
, 0, speed
)
5474 + rtx_cost (op1
, PLUS
, 1, speed
));
5478 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5480 *cost
+= extra_cost
->alu
.arith
;
5481 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5483 *cost
+= extra_cost
->fp
[mode
== DFmode
].addsub
;
5489 *cost
= COSTS_N_INSNS (1);
5492 *cost
+= extra_cost
->alu
.rev
;
5497 if (aarch_rev16_p (x
))
5499 *cost
= COSTS_N_INSNS (1);
5502 *cost
+= extra_cost
->alu
.rev
;
5514 && GET_CODE (op0
) == MULT
5515 && CONST_INT_P (XEXP (op0
, 1))
5516 && CONST_INT_P (op1
)
5517 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (op0
, 1))),
5520 /* This is a UBFM/SBFM. */
5521 *cost
+= rtx_cost (XEXP (op0
, 0), ZERO_EXTRACT
, 0, speed
);
5523 *cost
+= extra_cost
->alu
.bfx
;
5527 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
5529 /* We possibly get the immediate for free, this is not
5531 if (CONST_INT_P (op1
)
5532 && aarch64_bitmask_imm (INTVAL (op1
), GET_MODE (x
)))
5534 *cost
+= rtx_cost (op0
, (enum rtx_code
) code
, 0, speed
);
5537 *cost
+= extra_cost
->alu
.logical
;
5545 /* Handle ORN, EON, or BIC. */
5546 if (GET_CODE (op0
) == NOT
)
5547 op0
= XEXP (op0
, 0);
5549 new_op0
= aarch64_strip_shift (op0
);
5551 /* If we had a shift on op0 then this is a logical-shift-
5552 by-register/immediate operation. Otherwise, this is just
5553 a logical operation. */
5558 /* Shift by immediate. */
5559 if (CONST_INT_P (XEXP (op0
, 1)))
5560 *cost
+= extra_cost
->alu
.log_shift
;
5562 *cost
+= extra_cost
->alu
.log_shift_reg
;
5565 *cost
+= extra_cost
->alu
.logical
;
5568 /* In both cases we want to cost both operands. */
5569 *cost
+= rtx_cost (new_op0
, (enum rtx_code
) code
, 0, speed
)
5570 + rtx_cost (op1
, (enum rtx_code
) code
, 1, speed
);
5580 *cost
+= extra_cost
->alu
.logical
;
5582 /* The logical instruction could have the shifted register form,
5583 but the cost is the same if the shift is processed as a separate
5584 instruction, so we don't bother with it here. */
5590 /* If a value is written in SI mode, then zero extended to DI
5591 mode, the operation will in general be free as a write to
5592 a 'w' register implicitly zeroes the upper bits of an 'x'
5593 register. However, if this is
5595 (set (reg) (zero_extend (reg)))
5597 we must cost the explicit register move. */
5599 && GET_MODE (op0
) == SImode
5602 int op_cost
= rtx_cost (XEXP (x
, 0), ZERO_EXTEND
, 0, speed
);
5604 if (!op_cost
&& speed
)
5606 *cost
+= extra_cost
->alu
.extend
;
5608 /* Free, the cost is that of the SI mode operation. */
5613 else if (MEM_P (XEXP (x
, 0)))
5615 /* All loads can zero extend to any size for free. */
5616 *cost
= rtx_cost (XEXP (x
, 0), ZERO_EXTEND
, param
, speed
);
5622 *cost
+= extra_cost
->alu
.extend
;
5627 if (MEM_P (XEXP (x
, 0)))
5632 rtx address
= XEXP (XEXP (x
, 0), 0);
5633 *cost
+= extra_cost
->ldst
.load_sign_extend
;
5636 COSTS_N_INSNS (aarch64_address_cost (address
, mode
,
5643 *cost
+= extra_cost
->alu
.extend
;
5650 if (CONST_INT_P (op1
))
5652 /* LSL (immediate), UBMF, UBFIZ and friends. These are all
5655 *cost
+= extra_cost
->alu
.shift
;
5657 /* We can incorporate zero/sign extend for free. */
5658 if (GET_CODE (op0
) == ZERO_EXTEND
5659 || GET_CODE (op0
) == SIGN_EXTEND
)
5660 op0
= XEXP (op0
, 0);
5662 *cost
+= rtx_cost (op0
, ASHIFT
, 0, speed
);
5669 *cost
+= extra_cost
->alu
.shift_reg
;
5671 return false; /* All arguments need to be in registers. */
5681 if (CONST_INT_P (op1
))
5683 /* ASR (immediate) and friends. */
5685 *cost
+= extra_cost
->alu
.shift
;
5687 *cost
+= rtx_cost (op0
, (enum rtx_code
) code
, 0, speed
);
5693 /* ASR (register) and friends. */
5695 *cost
+= extra_cost
->alu
.shift_reg
;
5697 return false; /* All arguments need to be in registers. */
5702 if (aarch64_cmodel
== AARCH64_CMODEL_LARGE
)
5706 *cost
+= extra_cost
->ldst
.load
;
5708 else if (aarch64_cmodel
== AARCH64_CMODEL_SMALL
5709 || aarch64_cmodel
== AARCH64_CMODEL_SMALL_PIC
)
5711 /* ADRP, followed by ADD. */
5712 *cost
+= COSTS_N_INSNS (1);
5714 *cost
+= 2 * extra_cost
->alu
.arith
;
5716 else if (aarch64_cmodel
== AARCH64_CMODEL_TINY
5717 || aarch64_cmodel
== AARCH64_CMODEL_TINY_PIC
)
5721 *cost
+= extra_cost
->alu
.arith
;
5726 /* One extra load instruction, after accessing the GOT. */
5727 *cost
+= COSTS_N_INSNS (1);
5729 *cost
+= extra_cost
->ldst
.load
;
5735 /* ADRP/ADD (immediate). */
5737 *cost
+= extra_cost
->alu
.arith
;
5744 *cost
+= extra_cost
->alu
.bfx
;
5746 /* We can trust that the immediates used will be correct (there
5747 are no by-register forms), so we need only cost op0. */
5748 *cost
+= rtx_cost (XEXP (x
, 0), (enum rtx_code
) code
, 0, speed
);
5752 *cost
+= aarch64_rtx_mult_cost (x
, MULT
, 0, speed
);
5753 /* aarch64_rtx_mult_cost always handles recursion to its
5761 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
5762 *cost
+= (extra_cost
->mult
[GET_MODE (x
) == DImode
].add
5763 + extra_cost
->mult
[GET_MODE (x
) == DImode
].idiv
);
5764 else if (GET_MODE (x
) == DFmode
)
5765 *cost
+= (extra_cost
->fp
[1].mult
5766 + extra_cost
->fp
[1].div
);
5767 else if (GET_MODE (x
) == SFmode
)
5768 *cost
+= (extra_cost
->fp
[0].mult
5769 + extra_cost
->fp
[0].div
);
5771 return false; /* All arguments need to be in registers. */
5778 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5779 /* There is no integer SQRT, so only DIV and UDIV can get
5781 *cost
+= extra_cost
->mult
[mode
== DImode
].idiv
;
5783 *cost
+= extra_cost
->fp
[mode
== DFmode
].div
;
5785 return false; /* All arguments need to be in registers. */
5788 return aarch64_if_then_else_costs (XEXP (x
, 0), XEXP (x
, 1),
5789 XEXP (x
, 2), cost
, speed
);
5802 return false; /* All arguments must be in registers. */
5810 *cost
+= extra_cost
->fp
[mode
== DFmode
].fma
;
5812 /* FMSUB, FNMADD, and FNMSUB are free. */
5813 if (GET_CODE (op0
) == NEG
)
5814 op0
= XEXP (op0
, 0);
5816 if (GET_CODE (op2
) == NEG
)
5817 op2
= XEXP (op2
, 0);
5819 /* aarch64_fnma4_elt_to_64v2df has the NEG as operand 1,
5820 and the by-element operand as operand 0. */
5821 if (GET_CODE (op1
) == NEG
)
5822 op1
= XEXP (op1
, 0);
5824 /* Catch vector-by-element operations. The by-element operand can
5825 either be (vec_duplicate (vec_select (x))) or just
5826 (vec_select (x)), depending on whether we are multiplying by
5827 a vector or a scalar.
5829 Canonicalization is not very good in these cases, FMA4 will put the
5830 by-element operand as operand 0, FNMA4 will have it as operand 1. */
5831 if (GET_CODE (op0
) == VEC_DUPLICATE
)
5832 op0
= XEXP (op0
, 0);
5833 else if (GET_CODE (op1
) == VEC_DUPLICATE
)
5834 op1
= XEXP (op1
, 0);
5836 if (GET_CODE (op0
) == VEC_SELECT
)
5837 op0
= XEXP (op0
, 0);
5838 else if (GET_CODE (op1
) == VEC_SELECT
)
5839 op1
= XEXP (op1
, 0);
5841 /* If the remaining parameters are not registers,
5842 get the cost to put them into registers. */
5843 *cost
+= rtx_cost (op0
, FMA
, 0, speed
);
5844 *cost
+= rtx_cost (op1
, FMA
, 1, speed
);
5845 *cost
+= rtx_cost (op2
, FMA
, 2, speed
);
5850 *cost
+= extra_cost
->fp
[mode
== DFmode
].widen
;
5853 case FLOAT_TRUNCATE
:
5855 *cost
+= extra_cost
->fp
[mode
== DFmode
].narrow
;
5861 /* Strip the rounding part. They will all be implemented
5862 by the fcvt* family of instructions anyway. */
5863 if (GET_CODE (x
) == UNSPEC
)
5865 unsigned int uns_code
= XINT (x
, 1);
5867 if (uns_code
== UNSPEC_FRINTA
5868 || uns_code
== UNSPEC_FRINTM
5869 || uns_code
== UNSPEC_FRINTN
5870 || uns_code
== UNSPEC_FRINTP
5871 || uns_code
== UNSPEC_FRINTZ
)
5872 x
= XVECEXP (x
, 0, 0);
5876 *cost
+= extra_cost
->fp
[GET_MODE (x
) == DFmode
].toint
;
5878 *cost
+= rtx_cost (x
, (enum rtx_code
) code
, 0, speed
);
5882 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5884 /* FABS and FNEG are analogous. */
5886 *cost
+= extra_cost
->fp
[mode
== DFmode
].neg
;
5890 /* Integer ABS will either be split to
5891 two arithmetic instructions, or will be an ABS
5892 (scalar), which we don't model. */
5893 *cost
= COSTS_N_INSNS (2);
5895 *cost
+= 2 * extra_cost
->alu
.arith
;
5903 /* FMAXNM/FMINNM/FMAX/FMIN.
5904 TODO: This may not be accurate for all implementations, but
5905 we do not model this in the cost tables. */
5906 *cost
+= extra_cost
->fp
[mode
== DFmode
].addsub
;
5911 /* The floating point round to integer frint* instructions. */
5912 if (aarch64_frint_unspec_p (XINT (x
, 1)))
5915 *cost
+= extra_cost
->fp
[mode
== DFmode
].roundint
;
5920 if (XINT (x
, 1) == UNSPEC_RBIT
)
5923 *cost
+= extra_cost
->alu
.rev
;
5931 /* Decompose <su>muldi3_highpart. */
5932 if (/* (truncate:DI */
5935 && GET_MODE (XEXP (x
, 0)) == TImode
5936 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
5938 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5939 /* (ANY_EXTEND:TI (reg:DI))
5940 (ANY_EXTEND:TI (reg:DI))) */
5941 && ((GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
5942 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == ZERO_EXTEND
)
5943 || (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
5944 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == SIGN_EXTEND
))
5945 && GET_MODE (XEXP (XEXP (XEXP (XEXP (x
, 0), 0), 0), 0)) == DImode
5946 && GET_MODE (XEXP (XEXP (XEXP (XEXP (x
, 0), 0), 1), 0)) == DImode
5947 /* (const_int 64) */
5948 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5949 && UINTVAL (XEXP (XEXP (x
, 0), 1)) == 64)
5953 *cost
+= extra_cost
->mult
[mode
== DImode
].extend
;
5954 *cost
+= rtx_cost (XEXP (XEXP (XEXP (XEXP (x
, 0), 0), 0), 0),
5956 *cost
+= rtx_cost (XEXP (XEXP (XEXP (XEXP (x
, 0), 0), 1), 0),
5966 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5968 "\nFailed to cost RTX. Assuming default cost.\n");
5973 /* Wrapper around aarch64_rtx_costs, dumps the partial, or total cost
5974 calculated for X. This cost is stored in *COST. Returns true
5975 if the total cost of X was calculated. */
5977 aarch64_rtx_costs_wrapper (rtx x
, int code
, int outer
,
5978 int param
, int *cost
, bool speed
)
5980 bool result
= aarch64_rtx_costs (x
, code
, outer
, param
, cost
, speed
);
5982 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5984 print_rtl_single (dump_file
, x
);
5985 fprintf (dump_file
, "\n%s cost: %d (%s)\n",
5986 speed
? "Hot" : "Cold",
5987 *cost
, result
? "final" : "partial");
5994 aarch64_register_move_cost (enum machine_mode mode
,
5995 reg_class_t from_i
, reg_class_t to_i
)
5997 enum reg_class from
= (enum reg_class
) from_i
;
5998 enum reg_class to
= (enum reg_class
) to_i
;
5999 const struct cpu_regmove_cost
*regmove_cost
6000 = aarch64_tune_params
->regmove_cost
;
6002 /* Moving between GPR and stack cost is the same as GP2GP. */
6003 if ((from
== GENERAL_REGS
&& to
== STACK_REG
)
6004 || (to
== GENERAL_REGS
&& from
== STACK_REG
))
6005 return regmove_cost
->GP2GP
;
6007 /* To/From the stack register, we move via the gprs. */
6008 if (to
== STACK_REG
|| from
== STACK_REG
)
6009 return aarch64_register_move_cost (mode
, from
, GENERAL_REGS
)
6010 + aarch64_register_move_cost (mode
, GENERAL_REGS
, to
);
6012 if (from
== GENERAL_REGS
&& to
== GENERAL_REGS
)
6013 return regmove_cost
->GP2GP
;
6014 else if (from
== GENERAL_REGS
)
6015 return regmove_cost
->GP2FP
;
6016 else if (to
== GENERAL_REGS
)
6017 return regmove_cost
->FP2GP
;
6019 /* When AdvSIMD instructions are disabled it is not possible to move
6020 a 128-bit value directly between Q registers. This is handled in
6021 secondary reload. A general register is used as a scratch to move
6022 the upper DI value and the lower DI value is moved directly,
6023 hence the cost is the sum of three moves. */
6024 if (! TARGET_SIMD
&& GET_MODE_SIZE (mode
) == 128)
6025 return regmove_cost
->GP2FP
+ regmove_cost
->FP2GP
+ regmove_cost
->FP2FP
;
6027 return regmove_cost
->FP2FP
;
6031 aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
6032 reg_class_t rclass ATTRIBUTE_UNUSED
,
6033 bool in ATTRIBUTE_UNUSED
)
6035 return aarch64_tune_params
->memmov_cost
;
6038 /* Return the number of instructions that can be issued per cycle. */
6040 aarch64_sched_issue_rate (void)
6042 return aarch64_tune_params
->issue_rate
;
6045 /* Vectorizer cost model target hooks. */
6047 /* Implement targetm.vectorize.builtin_vectorization_cost. */
6049 aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
6051 int misalign ATTRIBUTE_UNUSED
)
6055 switch (type_of_cost
)
6058 return aarch64_tune_params
->vec_costs
->scalar_stmt_cost
;
6061 return aarch64_tune_params
->vec_costs
->scalar_load_cost
;
6064 return aarch64_tune_params
->vec_costs
->scalar_store_cost
;
6067 return aarch64_tune_params
->vec_costs
->vec_stmt_cost
;
6070 return aarch64_tune_params
->vec_costs
->vec_align_load_cost
;
6073 return aarch64_tune_params
->vec_costs
->vec_store_cost
;
6076 return aarch64_tune_params
->vec_costs
->vec_to_scalar_cost
;
6079 return aarch64_tune_params
->vec_costs
->scalar_to_vec_cost
;
6081 case unaligned_load
:
6082 return aarch64_tune_params
->vec_costs
->vec_unalign_load_cost
;
6084 case unaligned_store
:
6085 return aarch64_tune_params
->vec_costs
->vec_unalign_store_cost
;
6087 case cond_branch_taken
:
6088 return aarch64_tune_params
->vec_costs
->cond_taken_branch_cost
;
6090 case cond_branch_not_taken
:
6091 return aarch64_tune_params
->vec_costs
->cond_not_taken_branch_cost
;
6094 case vec_promote_demote
:
6095 return aarch64_tune_params
->vec_costs
->vec_stmt_cost
;
6098 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
6099 return elements
/ 2 + 1;
6106 /* Implement targetm.vectorize.add_stmt_cost. */
6108 aarch64_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
6109 struct _stmt_vec_info
*stmt_info
, int misalign
,
6110 enum vect_cost_model_location where
)
6112 unsigned *cost
= (unsigned *) data
;
6113 unsigned retval
= 0;
6115 if (flag_vect_cost_model
)
6117 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
6119 aarch64_builtin_vectorization_cost (kind
, vectype
, misalign
);
6121 /* Statements in an inner loop relative to the loop being
6122 vectorized are weighted more heavily. The value here is
6123 a function (linear for now) of the loop nest level. */
6124 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
6126 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6127 struct loop
*loop
= LOOP_VINFO_LOOP (loop_info
);
6128 unsigned nest_level
= loop_depth (loop
);
6130 count
*= nest_level
;
6133 retval
= (unsigned) (count
* stmt_cost
);
6134 cost
[where
] += retval
;
6140 static void initialize_aarch64_code_model (void);
6142 /* Parse the architecture extension string. */
6145 aarch64_parse_extension (char *str
)
6147 /* The extension string is parsed left to right. */
6148 const struct aarch64_option_extension
*opt
= NULL
;
6150 /* Flag to say whether we are adding or removing an extension. */
6151 int adding_ext
= -1;
6153 while (str
!= NULL
&& *str
!= 0)
6159 ext
= strchr (str
, '+');
6166 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
6177 error ("missing feature modifier after %qs", "+no");
6181 /* Scan over the extensions table trying to find an exact match. */
6182 for (opt
= all_extensions
; opt
->name
!= NULL
; opt
++)
6184 if (strlen (opt
->name
) == len
&& strncmp (opt
->name
, str
, len
) == 0)
6186 /* Add or remove the extension. */
6188 aarch64_isa_flags
|= opt
->flags_on
;
6190 aarch64_isa_flags
&= ~(opt
->flags_off
);
6195 if (opt
->name
== NULL
)
6197 /* Extension not found in list. */
6198 error ("unknown feature modifier %qs", str
);
6208 /* Parse the ARCH string. */
6211 aarch64_parse_arch (void)
6214 const struct processor
*arch
;
6215 char *str
= (char *) alloca (strlen (aarch64_arch_string
) + 1);
6218 strcpy (str
, aarch64_arch_string
);
6220 ext
= strchr (str
, '+');
6229 error ("missing arch name in -march=%qs", str
);
6233 /* Loop through the list of supported ARCHs to find a match. */
6234 for (arch
= all_architectures
; arch
->name
!= NULL
; arch
++)
6236 if (strlen (arch
->name
) == len
&& strncmp (arch
->name
, str
, len
) == 0)
6238 selected_arch
= arch
;
6239 aarch64_isa_flags
= selected_arch
->flags
;
6242 selected_cpu
= &all_cores
[selected_arch
->core
];
6246 /* ARCH string contains at least one extension. */
6247 aarch64_parse_extension (ext
);
6250 if (strcmp (selected_arch
->arch
, selected_cpu
->arch
))
6252 warning (0, "switch -mcpu=%s conflicts with -march=%s switch",
6253 selected_cpu
->name
, selected_arch
->name
);
6260 /* ARCH name not found in list. */
6261 error ("unknown value %qs for -march", str
);
6265 /* Parse the CPU string. */
6268 aarch64_parse_cpu (void)
6271 const struct processor
*cpu
;
6272 char *str
= (char *) alloca (strlen (aarch64_cpu_string
) + 1);
6275 strcpy (str
, aarch64_cpu_string
);
6277 ext
= strchr (str
, '+');
6286 error ("missing cpu name in -mcpu=%qs", str
);
6290 /* Loop through the list of supported CPUs to find a match. */
6291 for (cpu
= all_cores
; cpu
->name
!= NULL
; cpu
++)
6293 if (strlen (cpu
->name
) == len
&& strncmp (cpu
->name
, str
, len
) == 0)
6296 selected_tune
= cpu
;
6297 aarch64_isa_flags
= selected_cpu
->flags
;
6301 /* CPU string contains at least one extension. */
6302 aarch64_parse_extension (ext
);
6309 /* CPU name not found in list. */
6310 error ("unknown value %qs for -mcpu", str
);
6314 /* Parse the TUNE string. */
6317 aarch64_parse_tune (void)
6319 const struct processor
*cpu
;
6320 char *str
= (char *) alloca (strlen (aarch64_tune_string
) + 1);
6321 strcpy (str
, aarch64_tune_string
);
6323 /* Loop through the list of supported CPUs to find a match. */
6324 for (cpu
= all_cores
; cpu
->name
!= NULL
; cpu
++)
6326 if (strcmp (cpu
->name
, str
) == 0)
6328 selected_tune
= cpu
;
6333 /* CPU name not found in list. */
6334 error ("unknown value %qs for -mtune", str
);
6339 /* Implement TARGET_OPTION_OVERRIDE. */
6342 aarch64_override_options (void)
6344 /* -mcpu=CPU is shorthand for -march=ARCH_FOR_CPU, -mtune=CPU.
6345 If either of -march or -mtune is given, they override their
6346 respective component of -mcpu.
6348 So, first parse AARCH64_CPU_STRING, then the others, be careful
6349 with -march as, if -mcpu is not present on the command line, march
6350 must set a sensible default CPU. */
6351 if (aarch64_cpu_string
)
6353 aarch64_parse_cpu ();
6356 if (aarch64_arch_string
)
6358 aarch64_parse_arch ();
6361 if (aarch64_tune_string
)
6363 aarch64_parse_tune ();
6366 #ifndef HAVE_AS_MABI_OPTION
6367 /* The compiler may have been configured with 2.23.* binutils, which does
6368 not have support for ILP32. */
6370 error ("Assembler does not support -mabi=ilp32");
6373 initialize_aarch64_code_model ();
6375 aarch64_build_bitmask_table ();
6377 /* This target defaults to strict volatile bitfields. */
6378 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least (2))
6379 flag_strict_volatile_bitfields
= 1;
6381 /* If the user did not specify a processor, choose the default
6382 one for them. This will be the CPU set during configuration using
6383 --with-cpu, otherwise it is "generic". */
6386 selected_cpu
= &all_cores
[TARGET_CPU_DEFAULT
& 0x3f];
6387 aarch64_isa_flags
= TARGET_CPU_DEFAULT
>> 6;
6390 gcc_assert (selected_cpu
);
6392 /* The selected cpu may be an architecture, so lookup tuning by core ID. */
6394 selected_tune
= &all_cores
[selected_cpu
->core
];
6396 aarch64_tune_flags
= selected_tune
->flags
;
6397 aarch64_tune
= selected_tune
->core
;
6398 aarch64_tune_params
= selected_tune
->tune
;
6400 aarch64_override_options_after_change ();
6403 /* Implement targetm.override_options_after_change. */
6406 aarch64_override_options_after_change (void)
6408 if (flag_omit_frame_pointer
)
6409 flag_omit_leaf_frame_pointer
= false;
6410 else if (flag_omit_leaf_frame_pointer
)
6411 flag_omit_frame_pointer
= true;
6414 static struct machine_function
*
6415 aarch64_init_machine_status (void)
6417 struct machine_function
*machine
;
6418 machine
= ggc_cleared_alloc
<machine_function
> ();
6423 aarch64_init_expanders (void)
6425 init_machine_status
= aarch64_init_machine_status
;
6428 /* A checking mechanism for the implementation of the various code models. */
6430 initialize_aarch64_code_model (void)
6434 switch (aarch64_cmodel_var
)
6436 case AARCH64_CMODEL_TINY
:
6437 aarch64_cmodel
= AARCH64_CMODEL_TINY_PIC
;
6439 case AARCH64_CMODEL_SMALL
:
6440 aarch64_cmodel
= AARCH64_CMODEL_SMALL_PIC
;
6442 case AARCH64_CMODEL_LARGE
:
6443 sorry ("code model %qs with -f%s", "large",
6444 flag_pic
> 1 ? "PIC" : "pic");
6450 aarch64_cmodel
= aarch64_cmodel_var
;
6453 /* Return true if SYMBOL_REF X binds locally. */
6456 aarch64_symbol_binds_local_p (const_rtx x
)
6458 return (SYMBOL_REF_DECL (x
)
6459 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
6460 : SYMBOL_REF_LOCAL_P (x
));
6463 /* Return true if SYMBOL_REF X is thread local */
6465 aarch64_tls_symbol_p (rtx x
)
6467 if (! TARGET_HAVE_TLS
)
6470 if (GET_CODE (x
) != SYMBOL_REF
)
6473 return SYMBOL_REF_TLS_MODEL (x
) != 0;
6476 /* Classify a TLS symbol into one of the TLS kinds. */
6477 enum aarch64_symbol_type
6478 aarch64_classify_tls_symbol (rtx x
)
6480 enum tls_model tls_kind
= tls_symbolic_operand_type (x
);
6484 case TLS_MODEL_GLOBAL_DYNAMIC
:
6485 case TLS_MODEL_LOCAL_DYNAMIC
:
6486 return TARGET_TLS_DESC
? SYMBOL_SMALL_TLSDESC
: SYMBOL_SMALL_TLSGD
;
6488 case TLS_MODEL_INITIAL_EXEC
:
6489 return SYMBOL_SMALL_GOTTPREL
;
6491 case TLS_MODEL_LOCAL_EXEC
:
6492 return SYMBOL_SMALL_TPREL
;
6494 case TLS_MODEL_EMULATED
:
6495 case TLS_MODEL_NONE
:
6496 return SYMBOL_FORCE_TO_MEM
;
6503 /* Return the method that should be used to access SYMBOL_REF or
6504 LABEL_REF X in context CONTEXT. */
6506 enum aarch64_symbol_type
6507 aarch64_classify_symbol (rtx x
,
6508 enum aarch64_symbol_context context ATTRIBUTE_UNUSED
)
6510 if (GET_CODE (x
) == LABEL_REF
)
6512 switch (aarch64_cmodel
)
6514 case AARCH64_CMODEL_LARGE
:
6515 return SYMBOL_FORCE_TO_MEM
;
6517 case AARCH64_CMODEL_TINY_PIC
:
6518 case AARCH64_CMODEL_TINY
:
6519 return SYMBOL_TINY_ABSOLUTE
;
6521 case AARCH64_CMODEL_SMALL_PIC
:
6522 case AARCH64_CMODEL_SMALL
:
6523 return SYMBOL_SMALL_ABSOLUTE
;
6530 if (GET_CODE (x
) == SYMBOL_REF
)
6532 if (aarch64_cmodel
== AARCH64_CMODEL_LARGE
)
6533 return SYMBOL_FORCE_TO_MEM
;
6535 if (aarch64_tls_symbol_p (x
))
6536 return aarch64_classify_tls_symbol (x
);
6538 switch (aarch64_cmodel
)
6540 case AARCH64_CMODEL_TINY
:
6541 if (SYMBOL_REF_WEAK (x
))
6542 return SYMBOL_FORCE_TO_MEM
;
6543 return SYMBOL_TINY_ABSOLUTE
;
6545 case AARCH64_CMODEL_SMALL
:
6546 if (SYMBOL_REF_WEAK (x
))
6547 return SYMBOL_FORCE_TO_MEM
;
6548 return SYMBOL_SMALL_ABSOLUTE
;
6550 case AARCH64_CMODEL_TINY_PIC
:
6551 if (!aarch64_symbol_binds_local_p (x
))
6552 return SYMBOL_TINY_GOT
;
6553 return SYMBOL_TINY_ABSOLUTE
;
6555 case AARCH64_CMODEL_SMALL_PIC
:
6556 if (!aarch64_symbol_binds_local_p (x
))
6557 return SYMBOL_SMALL_GOT
;
6558 return SYMBOL_SMALL_ABSOLUTE
;
6565 /* By default push everything into the constant pool. */
6566 return SYMBOL_FORCE_TO_MEM
;
6570 aarch64_constant_address_p (rtx x
)
6572 return (CONSTANT_P (x
) && memory_address_p (DImode
, x
));
6576 aarch64_legitimate_pic_operand_p (rtx x
)
6578 if (GET_CODE (x
) == SYMBOL_REF
6579 || (GET_CODE (x
) == CONST
6580 && GET_CODE (XEXP (x
, 0)) == PLUS
6581 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
))
6587 /* Return true if X holds either a quarter-precision or
6588 floating-point +0.0 constant. */
6590 aarch64_valid_floating_const (enum machine_mode mode
, rtx x
)
6592 if (!CONST_DOUBLE_P (x
))
6595 /* TODO: We could handle moving 0.0 to a TFmode register,
6596 but first we would like to refactor the movtf_aarch64
6597 to be more amicable to split moves properly and
6598 correctly gate on TARGET_SIMD. For now - reject all
6599 constants which are not to SFmode or DFmode registers. */
6600 if (!(mode
== SFmode
|| mode
== DFmode
))
6603 if (aarch64_float_const_zero_rtx_p (x
))
6605 return aarch64_float_const_representable_p (x
);
6609 aarch64_legitimate_constant_p (enum machine_mode mode
, rtx x
)
6611 /* Do not allow vector struct mode constants. We could support
6612 0 and -1 easily, but they need support in aarch64-simd.md. */
6613 if (TARGET_SIMD
&& aarch64_vect_struct_mode_p (mode
))
6616 /* This could probably go away because
6617 we now decompose CONST_INTs according to expand_mov_immediate. */
6618 if ((GET_CODE (x
) == CONST_VECTOR
6619 && aarch64_simd_valid_immediate (x
, mode
, false, NULL
))
6620 || CONST_INT_P (x
) || aarch64_valid_floating_const (mode
, x
))
6621 return !targetm
.cannot_force_const_mem (mode
, x
);
6623 if (GET_CODE (x
) == HIGH
6624 && aarch64_valid_symref (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
6627 return aarch64_constant_address_p (x
);
6631 aarch64_load_tp (rtx target
)
6634 || GET_MODE (target
) != Pmode
6635 || !register_operand (target
, Pmode
))
6636 target
= gen_reg_rtx (Pmode
);
6638 /* Can return in any reg. */
6639 emit_insn (gen_aarch64_load_tp_hard (target
));
6643 /* On AAPCS systems, this is the "struct __va_list". */
6644 static GTY(()) tree va_list_type
;
6646 /* Implement TARGET_BUILD_BUILTIN_VA_LIST.
6647 Return the type to use as __builtin_va_list.
6649 AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
6661 aarch64_build_builtin_va_list (void)
6664 tree f_stack
, f_grtop
, f_vrtop
, f_groff
, f_vroff
;
6666 /* Create the type. */
6667 va_list_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6668 /* Give it the required name. */
6669 va_list_name
= build_decl (BUILTINS_LOCATION
,
6671 get_identifier ("__va_list"),
6673 DECL_ARTIFICIAL (va_list_name
) = 1;
6674 TYPE_NAME (va_list_type
) = va_list_name
;
6675 TYPE_STUB_DECL (va_list_type
) = va_list_name
;
6677 /* Create the fields. */
6678 f_stack
= build_decl (BUILTINS_LOCATION
,
6679 FIELD_DECL
, get_identifier ("__stack"),
6681 f_grtop
= build_decl (BUILTINS_LOCATION
,
6682 FIELD_DECL
, get_identifier ("__gr_top"),
6684 f_vrtop
= build_decl (BUILTINS_LOCATION
,
6685 FIELD_DECL
, get_identifier ("__vr_top"),
6687 f_groff
= build_decl (BUILTINS_LOCATION
,
6688 FIELD_DECL
, get_identifier ("__gr_offs"),
6690 f_vroff
= build_decl (BUILTINS_LOCATION
,
6691 FIELD_DECL
, get_identifier ("__vr_offs"),
6694 DECL_ARTIFICIAL (f_stack
) = 1;
6695 DECL_ARTIFICIAL (f_grtop
) = 1;
6696 DECL_ARTIFICIAL (f_vrtop
) = 1;
6697 DECL_ARTIFICIAL (f_groff
) = 1;
6698 DECL_ARTIFICIAL (f_vroff
) = 1;
6700 DECL_FIELD_CONTEXT (f_stack
) = va_list_type
;
6701 DECL_FIELD_CONTEXT (f_grtop
) = va_list_type
;
6702 DECL_FIELD_CONTEXT (f_vrtop
) = va_list_type
;
6703 DECL_FIELD_CONTEXT (f_groff
) = va_list_type
;
6704 DECL_FIELD_CONTEXT (f_vroff
) = va_list_type
;
6706 TYPE_FIELDS (va_list_type
) = f_stack
;
6707 DECL_CHAIN (f_stack
) = f_grtop
;
6708 DECL_CHAIN (f_grtop
) = f_vrtop
;
6709 DECL_CHAIN (f_vrtop
) = f_groff
;
6710 DECL_CHAIN (f_groff
) = f_vroff
;
6712 /* Compute its layout. */
6713 layout_type (va_list_type
);
6715 return va_list_type
;
6718 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
6720 aarch64_expand_builtin_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6722 const CUMULATIVE_ARGS
*cum
;
6723 tree f_stack
, f_grtop
, f_vrtop
, f_groff
, f_vroff
;
6724 tree stack
, grtop
, vrtop
, groff
, vroff
;
6726 int gr_save_area_size
;
6727 int vr_save_area_size
;
6730 cum
= &crtl
->args
.info
;
6732 = (NUM_ARG_REGS
- cum
->aapcs_ncrn
) * UNITS_PER_WORD
;
6734 = (NUM_FP_ARG_REGS
- cum
->aapcs_nvrn
) * UNITS_PER_VREG
;
6736 if (TARGET_GENERAL_REGS_ONLY
)
6738 if (cum
->aapcs_nvrn
> 0)
6739 sorry ("%qs and floating point or vector arguments",
6740 "-mgeneral-regs-only");
6741 vr_save_area_size
= 0;
6744 f_stack
= TYPE_FIELDS (va_list_type_node
);
6745 f_grtop
= DECL_CHAIN (f_stack
);
6746 f_vrtop
= DECL_CHAIN (f_grtop
);
6747 f_groff
= DECL_CHAIN (f_vrtop
);
6748 f_vroff
= DECL_CHAIN (f_groff
);
6750 stack
= build3 (COMPONENT_REF
, TREE_TYPE (f_stack
), valist
, f_stack
,
6752 grtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_grtop
), valist
, f_grtop
,
6754 vrtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_vrtop
), valist
, f_vrtop
,
6756 groff
= build3 (COMPONENT_REF
, TREE_TYPE (f_groff
), valist
, f_groff
,
6758 vroff
= build3 (COMPONENT_REF
, TREE_TYPE (f_vroff
), valist
, f_vroff
,
6761 /* Emit code to initialize STACK, which points to the next varargs stack
6762 argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
6763 by named arguments. STACK is 8-byte aligned. */
6764 t
= make_tree (TREE_TYPE (stack
), virtual_incoming_args_rtx
);
6765 if (cum
->aapcs_stack_size
> 0)
6766 t
= fold_build_pointer_plus_hwi (t
, cum
->aapcs_stack_size
* UNITS_PER_WORD
);
6767 t
= build2 (MODIFY_EXPR
, TREE_TYPE (stack
), stack
, t
);
6768 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6770 /* Emit code to initialize GRTOP, the top of the GR save area.
6771 virtual_incoming_args_rtx should have been 16 byte aligned. */
6772 t
= make_tree (TREE_TYPE (grtop
), virtual_incoming_args_rtx
);
6773 t
= build2 (MODIFY_EXPR
, TREE_TYPE (grtop
), grtop
, t
);
6774 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6776 /* Emit code to initialize VRTOP, the top of the VR save area.
6777 This address is gr_save_area_bytes below GRTOP, rounded
6778 down to the next 16-byte boundary. */
6779 t
= make_tree (TREE_TYPE (vrtop
), virtual_incoming_args_rtx
);
6780 vr_offset
= AARCH64_ROUND_UP (gr_save_area_size
,
6781 STACK_BOUNDARY
/ BITS_PER_UNIT
);
6784 t
= fold_build_pointer_plus_hwi (t
, -vr_offset
);
6785 t
= build2 (MODIFY_EXPR
, TREE_TYPE (vrtop
), vrtop
, t
);
6786 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6788 /* Emit code to initialize GROFF, the offset from GRTOP of the
6789 next GPR argument. */
6790 t
= build2 (MODIFY_EXPR
, TREE_TYPE (groff
), groff
,
6791 build_int_cst (TREE_TYPE (groff
), -gr_save_area_size
));
6792 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6794 /* Likewise emit code to initialize VROFF, the offset from FTOP
6795 of the next VR argument. */
6796 t
= build2 (MODIFY_EXPR
, TREE_TYPE (vroff
), vroff
,
6797 build_int_cst (TREE_TYPE (vroff
), -vr_save_area_size
));
6798 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6801 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
6804 aarch64_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6805 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
6809 bool is_ha
; /* is HFA or HVA. */
6810 bool dw_align
; /* double-word align. */
6811 enum machine_mode ag_mode
= VOIDmode
;
6813 enum machine_mode mode
;
6815 tree f_stack
, f_grtop
, f_vrtop
, f_groff
, f_vroff
;
6816 tree stack
, f_top
, f_off
, off
, arg
, roundup
, on_stack
;
6817 HOST_WIDE_INT size
, rsize
, adjust
, align
;
6818 tree t
, u
, cond1
, cond2
;
6820 indirect_p
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6822 type
= build_pointer_type (type
);
6824 mode
= TYPE_MODE (type
);
6826 f_stack
= TYPE_FIELDS (va_list_type_node
);
6827 f_grtop
= DECL_CHAIN (f_stack
);
6828 f_vrtop
= DECL_CHAIN (f_grtop
);
6829 f_groff
= DECL_CHAIN (f_vrtop
);
6830 f_vroff
= DECL_CHAIN (f_groff
);
6832 stack
= build3 (COMPONENT_REF
, TREE_TYPE (f_stack
), unshare_expr (valist
),
6833 f_stack
, NULL_TREE
);
6834 size
= int_size_in_bytes (type
);
6835 align
= aarch64_function_arg_alignment (mode
, type
) / BITS_PER_UNIT
;
6839 if (aarch64_vfp_is_call_or_return_candidate (mode
,
6845 /* TYPE passed in fp/simd registers. */
6846 if (TARGET_GENERAL_REGS_ONLY
)
6847 sorry ("%qs and floating point or vector arguments",
6848 "-mgeneral-regs-only");
6850 f_top
= build3 (COMPONENT_REF
, TREE_TYPE (f_vrtop
),
6851 unshare_expr (valist
), f_vrtop
, NULL_TREE
);
6852 f_off
= build3 (COMPONENT_REF
, TREE_TYPE (f_vroff
),
6853 unshare_expr (valist
), f_vroff
, NULL_TREE
);
6855 rsize
= nregs
* UNITS_PER_VREG
;
6859 if (BYTES_BIG_ENDIAN
&& GET_MODE_SIZE (ag_mode
) < UNITS_PER_VREG
)
6860 adjust
= UNITS_PER_VREG
- GET_MODE_SIZE (ag_mode
);
6862 else if (BLOCK_REG_PADDING (mode
, type
, 1) == downward
6863 && size
< UNITS_PER_VREG
)
6865 adjust
= UNITS_PER_VREG
- size
;
6870 /* TYPE passed in general registers. */
6871 f_top
= build3 (COMPONENT_REF
, TREE_TYPE (f_grtop
),
6872 unshare_expr (valist
), f_grtop
, NULL_TREE
);
6873 f_off
= build3 (COMPONENT_REF
, TREE_TYPE (f_groff
),
6874 unshare_expr (valist
), f_groff
, NULL_TREE
);
6875 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
6876 nregs
= rsize
/ UNITS_PER_WORD
;
6881 if (BLOCK_REG_PADDING (mode
, type
, 1) == downward
6882 && size
< UNITS_PER_WORD
)
6884 adjust
= UNITS_PER_WORD
- size
;
6888 /* Get a local temporary for the field value. */
6889 off
= get_initialized_tmp_var (f_off
, pre_p
, NULL
);
6891 /* Emit code to branch if off >= 0. */
6892 t
= build2 (GE_EXPR
, boolean_type_node
, off
,
6893 build_int_cst (TREE_TYPE (off
), 0));
6894 cond1
= build3 (COND_EXPR
, ptr_type_node
, t
, NULL_TREE
, NULL_TREE
);
6898 /* Emit: offs = (offs + 15) & -16. */
6899 t
= build2 (PLUS_EXPR
, TREE_TYPE (off
), off
,
6900 build_int_cst (TREE_TYPE (off
), 15));
6901 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (off
), t
,
6902 build_int_cst (TREE_TYPE (off
), -16));
6903 roundup
= build2 (MODIFY_EXPR
, TREE_TYPE (off
), off
, t
);
6908 /* Update ap.__[g|v]r_offs */
6909 t
= build2 (PLUS_EXPR
, TREE_TYPE (off
), off
,
6910 build_int_cst (TREE_TYPE (off
), rsize
));
6911 t
= build2 (MODIFY_EXPR
, TREE_TYPE (f_off
), unshare_expr (f_off
), t
);
6915 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), roundup
, t
);
6917 /* [cond2] if (ap.__[g|v]r_offs > 0) */
6918 u
= build2 (GT_EXPR
, boolean_type_node
, unshare_expr (f_off
),
6919 build_int_cst (TREE_TYPE (f_off
), 0));
6920 cond2
= build3 (COND_EXPR
, ptr_type_node
, u
, NULL_TREE
, NULL_TREE
);
6922 /* String up: make sure the assignment happens before the use. */
6923 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (cond2
), t
, cond2
);
6924 COND_EXPR_ELSE (cond1
) = t
;
6926 /* Prepare the trees handling the argument that is passed on the stack;
6927 the top level node will store in ON_STACK. */
6928 arg
= get_initialized_tmp_var (stack
, pre_p
, NULL
);
6931 /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
6932 t
= fold_convert (intDI_type_node
, arg
);
6933 t
= build2 (PLUS_EXPR
, TREE_TYPE (t
), t
,
6934 build_int_cst (TREE_TYPE (t
), 15));
6935 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
6936 build_int_cst (TREE_TYPE (t
), -16));
6937 t
= fold_convert (TREE_TYPE (arg
), t
);
6938 roundup
= build2 (MODIFY_EXPR
, TREE_TYPE (arg
), arg
, t
);
6942 /* Advance ap.__stack */
6943 t
= fold_convert (intDI_type_node
, arg
);
6944 t
= build2 (PLUS_EXPR
, TREE_TYPE (t
), t
,
6945 build_int_cst (TREE_TYPE (t
), size
+ 7));
6946 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
6947 build_int_cst (TREE_TYPE (t
), -8));
6948 t
= fold_convert (TREE_TYPE (arg
), t
);
6949 t
= build2 (MODIFY_EXPR
, TREE_TYPE (stack
), unshare_expr (stack
), t
);
6950 /* String up roundup and advance. */
6952 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), roundup
, t
);
6953 /* String up with arg */
6954 on_stack
= build2 (COMPOUND_EXPR
, TREE_TYPE (arg
), t
, arg
);
6955 /* Big-endianness related address adjustment. */
6956 if (BLOCK_REG_PADDING (mode
, type
, 1) == downward
6957 && size
< UNITS_PER_WORD
)
6959 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (arg
), arg
,
6960 size_int (UNITS_PER_WORD
- size
));
6961 on_stack
= build2 (COMPOUND_EXPR
, TREE_TYPE (arg
), on_stack
, t
);
6964 COND_EXPR_THEN (cond1
) = unshare_expr (on_stack
);
6965 COND_EXPR_THEN (cond2
) = unshare_expr (on_stack
);
6967 /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
6970 t
= build2 (PREINCREMENT_EXPR
, TREE_TYPE (off
), off
,
6971 build_int_cst (TREE_TYPE (off
), adjust
));
6973 t
= fold_convert (sizetype
, t
);
6974 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (f_top
), f_top
, t
);
6978 /* type ha; // treat as "struct {ftype field[n];}"
6979 ... [computing offs]
6980 for (i = 0; i <nregs; ++i, offs += 16)
6981 ha.field[i] = *((ftype *)(ap.__vr_top + offs));
6984 tree tmp_ha
, field_t
, field_ptr_t
;
6986 /* Declare a local variable. */
6987 tmp_ha
= create_tmp_var_raw (type
, "ha");
6988 gimple_add_tmp_var (tmp_ha
);
6990 /* Establish the base type. */
6994 field_t
= float_type_node
;
6995 field_ptr_t
= float_ptr_type_node
;
6998 field_t
= double_type_node
;
6999 field_ptr_t
= double_ptr_type_node
;
7002 field_t
= long_double_type_node
;
7003 field_ptr_t
= long_double_ptr_type_node
;
7005 /* The half precision and quad precision are not fully supported yet. Enable
7006 the following code after the support is complete. Need to find the correct
7007 type node for __fp16 *. */
7010 field_t
= float_type_node
;
7011 field_ptr_t
= float_ptr_type_node
;
7017 tree innertype
= make_signed_type (GET_MODE_PRECISION (SImode
));
7018 field_t
= build_vector_type_for_mode (innertype
, ag_mode
);
7019 field_ptr_t
= build_pointer_type (field_t
);
7026 /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
7027 tmp_ha
= build1 (ADDR_EXPR
, field_ptr_t
, tmp_ha
);
7029 t
= fold_convert (field_ptr_t
, addr
);
7030 t
= build2 (MODIFY_EXPR
, field_t
,
7031 build1 (INDIRECT_REF
, field_t
, tmp_ha
),
7032 build1 (INDIRECT_REF
, field_t
, t
));
7034 /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
7035 for (i
= 1; i
< nregs
; ++i
)
7037 addr
= fold_build_pointer_plus_hwi (addr
, UNITS_PER_VREG
);
7038 u
= fold_convert (field_ptr_t
, addr
);
7039 u
= build2 (MODIFY_EXPR
, field_t
,
7040 build2 (MEM_REF
, field_t
, tmp_ha
,
7041 build_int_cst (field_ptr_t
,
7043 int_size_in_bytes (field_t
)))),
7044 build1 (INDIRECT_REF
, field_t
, u
));
7045 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), t
, u
);
7048 u
= fold_convert (TREE_TYPE (f_top
), tmp_ha
);
7049 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (f_top
), t
, u
);
7052 COND_EXPR_ELSE (cond2
) = t
;
7053 addr
= fold_convert (build_pointer_type (type
), cond1
);
7054 addr
= build_va_arg_indirect_ref (addr
);
7057 addr
= build_va_arg_indirect_ref (addr
);
7062 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
7065 aarch64_setup_incoming_varargs (cumulative_args_t cum_v
, enum machine_mode mode
,
7066 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
7069 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
7070 CUMULATIVE_ARGS local_cum
;
7071 int gr_saved
, vr_saved
;
7073 /* The caller has advanced CUM up to, but not beyond, the last named
7074 argument. Advance a local copy of CUM past the last "real" named
7075 argument, to find out how many registers are left over. */
7077 aarch64_function_arg_advance (pack_cumulative_args(&local_cum
), mode
, type
, true);
7079 /* Found out how many registers we need to save. */
7080 gr_saved
= NUM_ARG_REGS
- local_cum
.aapcs_ncrn
;
7081 vr_saved
= NUM_FP_ARG_REGS
- local_cum
.aapcs_nvrn
;
7083 if (TARGET_GENERAL_REGS_ONLY
)
7085 if (local_cum
.aapcs_nvrn
> 0)
7086 sorry ("%qs and floating point or vector arguments",
7087 "-mgeneral-regs-only");
7097 /* virtual_incoming_args_rtx should have been 16-byte aligned. */
7098 ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
,
7099 - gr_saved
* UNITS_PER_WORD
);
7100 mem
= gen_frame_mem (BLKmode
, ptr
);
7101 set_mem_alias_set (mem
, get_varargs_alias_set ());
7103 move_block_from_reg (local_cum
.aapcs_ncrn
+ R0_REGNUM
,
7108 /* We can't use move_block_from_reg, because it will use
7109 the wrong mode, storing D regs only. */
7110 enum machine_mode mode
= TImode
;
7113 /* Set OFF to the offset from virtual_incoming_args_rtx of
7114 the first vector register. The VR save area lies below
7115 the GR one, and is aligned to 16 bytes. */
7116 off
= -AARCH64_ROUND_UP (gr_saved
* UNITS_PER_WORD
,
7117 STACK_BOUNDARY
/ BITS_PER_UNIT
);
7118 off
-= vr_saved
* UNITS_PER_VREG
;
7120 for (i
= local_cum
.aapcs_nvrn
; i
< NUM_FP_ARG_REGS
; ++i
)
7124 ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
, off
);
7125 mem
= gen_frame_mem (mode
, ptr
);
7126 set_mem_alias_set (mem
, get_varargs_alias_set ());
7127 aarch64_emit_move (mem
, gen_rtx_REG (mode
, V0_REGNUM
+ i
));
7128 off
+= UNITS_PER_VREG
;
7133 /* We don't save the size into *PRETEND_SIZE because we want to avoid
7134 any complication of having crtl->args.pretend_args_size changed. */
7135 cfun
->machine
->frame
.saved_varargs_size
7136 = (AARCH64_ROUND_UP (gr_saved
* UNITS_PER_WORD
,
7137 STACK_BOUNDARY
/ BITS_PER_UNIT
)
7138 + vr_saved
* UNITS_PER_VREG
);
7142 aarch64_conditional_register_usage (void)
7147 for (i
= V0_REGNUM
; i
<= V31_REGNUM
; i
++)
7150 call_used_regs
[i
] = 1;
7155 /* Walk down the type tree of TYPE counting consecutive base elements.
7156 If *MODEP is VOIDmode, then set it to the first valid floating point
7157 type. If a non-floating point type is found, or if a floating point
7158 type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
7159 otherwise return the count in the sub-tree. */
7161 aapcs_vfp_sub_candidate (const_tree type
, enum machine_mode
*modep
)
7163 enum machine_mode mode
;
7166 switch (TREE_CODE (type
))
7169 mode
= TYPE_MODE (type
);
7170 if (mode
!= DFmode
&& mode
!= SFmode
&& mode
!= TFmode
)
7173 if (*modep
== VOIDmode
)
7182 mode
= TYPE_MODE (TREE_TYPE (type
));
7183 if (mode
!= DFmode
&& mode
!= SFmode
&& mode
!= TFmode
)
7186 if (*modep
== VOIDmode
)
7195 /* Use V2SImode and V4SImode as representatives of all 64-bit
7196 and 128-bit vector types. */
7197 size
= int_size_in_bytes (type
);
7210 if (*modep
== VOIDmode
)
7213 /* Vector modes are considered to be opaque: two vectors are
7214 equivalent for the purposes of being homogeneous aggregates
7215 if they are the same size. */
7224 tree index
= TYPE_DOMAIN (type
);
7226 /* Can't handle incomplete types nor sizes that are not
7228 if (!COMPLETE_TYPE_P (type
)
7229 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
7232 count
= aapcs_vfp_sub_candidate (TREE_TYPE (type
), modep
);
7235 || !TYPE_MAX_VALUE (index
)
7236 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
7237 || !TYPE_MIN_VALUE (index
)
7238 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
7242 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
7243 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
7245 /* There must be no padding. */
7246 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
7258 /* Can't handle incomplete types nor sizes that are not
7260 if (!COMPLETE_TYPE_P (type
)
7261 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
7264 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
7266 if (TREE_CODE (field
) != FIELD_DECL
)
7269 sub_count
= aapcs_vfp_sub_candidate (TREE_TYPE (field
), modep
);
7275 /* There must be no padding. */
7276 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
7283 case QUAL_UNION_TYPE
:
7285 /* These aren't very interesting except in a degenerate case. */
7290 /* Can't handle incomplete types nor sizes that are not
7292 if (!COMPLETE_TYPE_P (type
)
7293 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
7296 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
7298 if (TREE_CODE (field
) != FIELD_DECL
)
7301 sub_count
= aapcs_vfp_sub_candidate (TREE_TYPE (field
), modep
);
7304 count
= count
> sub_count
? count
: sub_count
;
7307 /* There must be no padding. */
7308 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
7321 /* Return true if we use LRA instead of reload pass. */
7323 aarch64_lra_p (void)
7325 return aarch64_lra_flag
;
7328 /* Return TRUE if the type, as described by TYPE and MODE, is a composite
7329 type as described in AAPCS64 \S 4.3. This includes aggregate, union and
7330 array types. The C99 floating-point complex types are also considered
7331 as composite types, according to AAPCS64 \S 7.1.1. The complex integer
7332 types, which are GCC extensions and out of the scope of AAPCS64, are
7333 treated as composite types here as well.
7335 Note that MODE itself is not sufficient in determining whether a type
7336 is such a composite type or not. This is because
7337 stor-layout.c:compute_record_mode may have already changed the MODE
7338 (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
7339 structure with only one field may have its MODE set to the mode of the
7340 field. Also an integer mode whose size matches the size of the
7341 RECORD_TYPE type may be used to substitute the original mode
7342 (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
7343 solely relied on. */
7346 aarch64_composite_type_p (const_tree type
,
7347 enum machine_mode mode
)
7349 if (type
&& (AGGREGATE_TYPE_P (type
) || TREE_CODE (type
) == COMPLEX_TYPE
))
7353 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
7354 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
)
7360 /* Return TRUE if the type, as described by TYPE and MODE, is a short vector
7361 type as described in AAPCS64 \S 4.1.2.
7363 See the comment above aarch64_composite_type_p for the notes on MODE. */
7366 aarch64_short_vector_p (const_tree type
,
7367 enum machine_mode mode
)
7369 HOST_WIDE_INT size
= -1;
7371 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
7372 size
= int_size_in_bytes (type
);
7373 else if (!aarch64_composite_type_p (type
, mode
)
7374 && (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7375 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
))
7376 size
= GET_MODE_SIZE (mode
);
7378 return (size
== 8 || size
== 16) ? true : false;
7381 /* Return TRUE if an argument, whose type is described by TYPE and MODE,
7382 shall be passed or returned in simd/fp register(s) (providing these
7383 parameter passing registers are available).
7385 Upon successful return, *COUNT returns the number of needed registers,
7386 *BASE_MODE returns the mode of the individual register and when IS_HAF
7387 is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
7388 floating-point aggregate or a homogeneous short-vector aggregate. */
7391 aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode
,
7393 enum machine_mode
*base_mode
,
7397 enum machine_mode new_mode
= VOIDmode
;
7398 bool composite_p
= aarch64_composite_type_p (type
, mode
);
7400 if (is_ha
!= NULL
) *is_ha
= false;
7402 if ((!composite_p
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
7403 || aarch64_short_vector_p (type
, mode
))
7408 else if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
7410 if (is_ha
!= NULL
) *is_ha
= true;
7412 new_mode
= GET_MODE_INNER (mode
);
7414 else if (type
&& composite_p
)
7416 int ag_count
= aapcs_vfp_sub_candidate (type
, &new_mode
);
7418 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
7420 if (is_ha
!= NULL
) *is_ha
= true;
7429 *base_mode
= new_mode
;
7433 /* Implement TARGET_STRUCT_VALUE_RTX. */
7436 aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
7437 int incoming ATTRIBUTE_UNUSED
)
7439 return gen_rtx_REG (Pmode
, AARCH64_STRUCT_VALUE_REGNUM
);
7442 /* Implements target hook vector_mode_supported_p. */
7444 aarch64_vector_mode_supported_p (enum machine_mode mode
)
7447 && (mode
== V4SImode
|| mode
== V8HImode
7448 || mode
== V16QImode
|| mode
== V2DImode
7449 || mode
== V2SImode
|| mode
== V4HImode
7450 || mode
== V8QImode
|| mode
== V2SFmode
7451 || mode
== V4SFmode
|| mode
== V2DFmode
7452 || mode
== V1DFmode
))
7458 /* Return appropriate SIMD container
7459 for MODE within a vector of WIDTH bits. */
7460 static enum machine_mode
7461 aarch64_simd_container_mode (enum machine_mode mode
, unsigned width
)
7463 gcc_assert (width
== 64 || width
== 128);
7502 /* Return 128-bit container as the preferred SIMD mode for MODE. */
7503 static enum machine_mode
7504 aarch64_preferred_simd_mode (enum machine_mode mode
)
7506 return aarch64_simd_container_mode (mode
, 128);
7509 /* Return the bitmask of possible vector sizes for the vectorizer
7512 aarch64_autovectorize_vector_sizes (void)
7517 /* A table to help perform AArch64-specific name mangling for AdvSIMD
7518 vector types in order to conform to the AAPCS64 (see "Procedure
7519 Call Standard for the ARM 64-bit Architecture", Appendix A). To
7520 qualify for emission with the mangled names defined in that document,
7521 a vector type must not only be of the correct mode but also be
7522 composed of AdvSIMD vector element types (e.g.
7523 _builtin_aarch64_simd_qi); these types are registered by
7524 aarch64_init_simd_builtins (). In other words, vector types defined
7525 in other ways e.g. via vector_size attribute will get default
7529 enum machine_mode mode
;
7530 const char *element_type_name
;
7531 const char *mangled_name
;
7532 } aarch64_simd_mangle_map_entry
;
7534 static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map
[] = {
7535 /* 64-bit containerized types. */
7536 { V8QImode
, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
7537 { V8QImode
, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
7538 { V4HImode
, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
7539 { V4HImode
, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
7540 { V2SImode
, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
7541 { V2SImode
, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
7542 { V2SFmode
, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
7543 { DImode
, "__builtin_aarch64_simd_di", "11__Int64x1_t" },
7544 { DImode
, "__builtin_aarch64_simd_udi", "12__Uint64x1_t" },
7545 { V1DFmode
, "__builtin_aarch64_simd_df", "13__Float64x1_t" },
7546 { V8QImode
, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
7547 { V4HImode
, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
7548 /* 128-bit containerized types. */
7549 { V16QImode
, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
7550 { V16QImode
, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
7551 { V8HImode
, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
7552 { V8HImode
, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
7553 { V4SImode
, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
7554 { V4SImode
, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
7555 { V2DImode
, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
7556 { V2DImode
, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
7557 { V4SFmode
, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
7558 { V2DFmode
, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
7559 { V16QImode
, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
7560 { V8HImode
, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
7561 { V2DImode
, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" },
7562 { VOIDmode
, NULL
, NULL
}
7565 /* Implement TARGET_MANGLE_TYPE. */
7568 aarch64_mangle_type (const_tree type
)
7570 /* The AArch64 ABI documents say that "__va_list" has to be
7571 managled as if it is in the "std" namespace. */
7572 if (lang_hooks
.types_compatible_p (CONST_CAST_TREE (type
), va_list_type
))
7573 return "St9__va_list";
7575 /* Check the mode of the vector type, and the name of the vector
7576 element type, against the table. */
7577 if (TREE_CODE (type
) == VECTOR_TYPE
)
7579 aarch64_simd_mangle_map_entry
*pos
= aarch64_simd_mangle_map
;
7581 while (pos
->mode
!= VOIDmode
)
7583 tree elt_type
= TREE_TYPE (type
);
7585 if (pos
->mode
== TYPE_MODE (type
)
7586 && TREE_CODE (TYPE_NAME (elt_type
)) == TYPE_DECL
7587 && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type
))),
7588 pos
->element_type_name
))
7589 return pos
->mangled_name
;
7595 /* Use the default mangling. */
7599 /* Return the equivalent letter for size. */
7601 sizetochar (int size
)
7605 case 64: return 'd';
7606 case 32: return 's';
7607 case 16: return 'h';
7608 case 8 : return 'b';
7609 default: gcc_unreachable ();
7613 /* Return true iff x is a uniform vector of floating-point
7614 constants, and the constant can be represented in
7615 quarter-precision form. Note, as aarch64_float_const_representable
7616 rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */
7618 aarch64_vect_float_const_representable_p (rtx x
)
7621 REAL_VALUE_TYPE r0
, ri
;
7624 if (GET_MODE_CLASS (GET_MODE (x
)) != MODE_VECTOR_FLOAT
)
7627 x0
= CONST_VECTOR_ELT (x
, 0);
7628 if (!CONST_DOUBLE_P (x0
))
7631 REAL_VALUE_FROM_CONST_DOUBLE (r0
, x0
);
7633 for (i
= 1; i
< CONST_VECTOR_NUNITS (x
); i
++)
7635 xi
= CONST_VECTOR_ELT (x
, i
);
7636 if (!CONST_DOUBLE_P (xi
))
7639 REAL_VALUE_FROM_CONST_DOUBLE (ri
, xi
);
7640 if (!REAL_VALUES_EQUAL (r0
, ri
))
7644 return aarch64_float_const_representable_p (x0
);
7647 /* Return true for valid and false for invalid. */
7649 aarch64_simd_valid_immediate (rtx op
, enum machine_mode mode
, bool inverse
,
7650 struct simd_immediate_info
*info
)
7652 #define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
7654 for (i = 0; i < idx; i += (STRIDE)) \
7659 immtype = (CLASS); \
7660 elsize = (ELSIZE); \
7666 unsigned int i
, elsize
= 0, idx
= 0, n_elts
= CONST_VECTOR_NUNITS (op
);
7667 unsigned int innersize
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
7668 unsigned char bytes
[16];
7669 int immtype
= -1, matches
;
7670 unsigned int invmask
= inverse
? 0xff : 0;
7673 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
7675 if (! (aarch64_simd_imm_zero_p (op
, mode
)
7676 || aarch64_vect_float_const_representable_p (op
)))
7681 info
->value
= CONST_VECTOR_ELT (op
, 0);
7682 info
->element_width
= GET_MODE_BITSIZE (GET_MODE (info
->value
));
7690 /* Splat vector constant out into a byte vector. */
7691 for (i
= 0; i
< n_elts
; i
++)
7693 /* The vector is provided in gcc endian-neutral fashion. For aarch64_be,
7694 it must be laid out in the vector register in reverse order. */
7695 rtx el
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? (n_elts
- 1 - i
) : i
);
7696 unsigned HOST_WIDE_INT elpart
;
7697 unsigned int part
, parts
;
7699 if (CONST_INT_P (el
))
7701 elpart
= INTVAL (el
);
7704 else if (GET_CODE (el
) == CONST_DOUBLE
)
7706 elpart
= CONST_DOUBLE_LOW (el
);
7712 for (part
= 0; part
< parts
; part
++)
7715 for (byte
= 0; byte
< innersize
; byte
++)
7717 bytes
[idx
++] = (elpart
& 0xff) ^ invmask
;
7718 elpart
>>= BITS_PER_UNIT
;
7720 if (GET_CODE (el
) == CONST_DOUBLE
)
7721 elpart
= CONST_DOUBLE_HIGH (el
);
7726 gcc_assert (idx
== GET_MODE_SIZE (mode
));
7730 CHECK (4, 32, 0, bytes
[i
] == bytes
[0] && bytes
[i
+ 1] == 0
7731 && bytes
[i
+ 2] == 0 && bytes
[i
+ 3] == 0, 0, 0);
7733 CHECK (4, 32, 1, bytes
[i
] == 0 && bytes
[i
+ 1] == bytes
[1]
7734 && bytes
[i
+ 2] == 0 && bytes
[i
+ 3] == 0, 8, 0);
7736 CHECK (4, 32, 2, bytes
[i
] == 0 && bytes
[i
+ 1] == 0
7737 && bytes
[i
+ 2] == bytes
[2] && bytes
[i
+ 3] == 0, 16, 0);
7739 CHECK (4, 32, 3, bytes
[i
] == 0 && bytes
[i
+ 1] == 0
7740 && bytes
[i
+ 2] == 0 && bytes
[i
+ 3] == bytes
[3], 24, 0);
7742 CHECK (2, 16, 4, bytes
[i
] == bytes
[0] && bytes
[i
+ 1] == 0, 0, 0);
7744 CHECK (2, 16, 5, bytes
[i
] == 0 && bytes
[i
+ 1] == bytes
[1], 8, 0);
7746 CHECK (4, 32, 6, bytes
[i
] == bytes
[0] && bytes
[i
+ 1] == 0xff
7747 && bytes
[i
+ 2] == 0xff && bytes
[i
+ 3] == 0xff, 0, 1);
7749 CHECK (4, 32, 7, bytes
[i
] == 0xff && bytes
[i
+ 1] == bytes
[1]
7750 && bytes
[i
+ 2] == 0xff && bytes
[i
+ 3] == 0xff, 8, 1);
7752 CHECK (4, 32, 8, bytes
[i
] == 0xff && bytes
[i
+ 1] == 0xff
7753 && bytes
[i
+ 2] == bytes
[2] && bytes
[i
+ 3] == 0xff, 16, 1);
7755 CHECK (4, 32, 9, bytes
[i
] == 0xff && bytes
[i
+ 1] == 0xff
7756 && bytes
[i
+ 2] == 0xff && bytes
[i
+ 3] == bytes
[3], 24, 1);
7758 CHECK (2, 16, 10, bytes
[i
] == bytes
[0] && bytes
[i
+ 1] == 0xff, 0, 1);
7760 CHECK (2, 16, 11, bytes
[i
] == 0xff && bytes
[i
+ 1] == bytes
[1], 8, 1);
7762 CHECK (4, 32, 12, bytes
[i
] == 0xff && bytes
[i
+ 1] == bytes
[1]
7763 && bytes
[i
+ 2] == 0 && bytes
[i
+ 3] == 0, 8, 0);
7765 CHECK (4, 32, 13, bytes
[i
] == 0 && bytes
[i
+ 1] == bytes
[1]
7766 && bytes
[i
+ 2] == 0xff && bytes
[i
+ 3] == 0xff, 8, 1);
7768 CHECK (4, 32, 14, bytes
[i
] == 0xff && bytes
[i
+ 1] == 0xff
7769 && bytes
[i
+ 2] == bytes
[2] && bytes
[i
+ 3] == 0, 16, 0);
7771 CHECK (4, 32, 15, bytes
[i
] == 0 && bytes
[i
+ 1] == 0
7772 && bytes
[i
+ 2] == bytes
[2] && bytes
[i
+ 3] == 0xff, 16, 1);
7774 CHECK (1, 8, 16, bytes
[i
] == bytes
[0], 0, 0);
7776 CHECK (1, 64, 17, (bytes
[i
] == 0 || bytes
[i
] == 0xff)
7777 && bytes
[i
] == bytes
[(i
+ 8) % idx
], 0, 0);
7786 info
->element_width
= elsize
;
7787 info
->mvn
= emvn
!= 0;
7788 info
->shift
= eshift
;
7790 unsigned HOST_WIDE_INT imm
= 0;
7792 if (immtype
>= 12 && immtype
<= 15)
7795 /* Un-invert bytes of recognized vector, if necessary. */
7797 for (i
= 0; i
< idx
; i
++)
7798 bytes
[i
] ^= invmask
;
7802 /* FIXME: Broken on 32-bit H_W_I hosts. */
7803 gcc_assert (sizeof (HOST_WIDE_INT
) == 8);
7805 for (i
= 0; i
< 8; i
++)
7806 imm
|= (unsigned HOST_WIDE_INT
) (bytes
[i
] ? 0xff : 0)
7807 << (i
* BITS_PER_UNIT
);
7810 info
->value
= GEN_INT (imm
);
7814 for (i
= 0; i
< elsize
/ BITS_PER_UNIT
; i
++)
7815 imm
|= (unsigned HOST_WIDE_INT
) bytes
[i
] << (i
* BITS_PER_UNIT
);
7817 /* Construct 'abcdefgh' because the assembler cannot handle
7818 generic constants. */
7821 imm
= (imm
>> info
->shift
) & 0xff;
7822 info
->value
= GEN_INT (imm
);
7831 aarch64_const_vec_all_same_int_p (rtx x
,
7832 HOST_WIDE_INT minval
,
7833 HOST_WIDE_INT maxval
)
7835 HOST_WIDE_INT firstval
;
7838 if (GET_CODE (x
) != CONST_VECTOR
7839 || GET_MODE_CLASS (GET_MODE (x
)) != MODE_VECTOR_INT
)
7842 firstval
= INTVAL (CONST_VECTOR_ELT (x
, 0));
7843 if (firstval
< minval
|| firstval
> maxval
)
7846 count
= CONST_VECTOR_NUNITS (x
);
7847 for (i
= 1; i
< count
; i
++)
7848 if (INTVAL (CONST_VECTOR_ELT (x
, i
)) != firstval
)
7854 /* Check of immediate shift constants are within range. */
7856 aarch64_simd_shift_imm_p (rtx x
, enum machine_mode mode
, bool left
)
7858 int bit_width
= GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
;
7860 return aarch64_const_vec_all_same_int_p (x
, 0, bit_width
- 1);
7862 return aarch64_const_vec_all_same_int_p (x
, 1, bit_width
);
7865 /* Return true if X is a uniform vector where all elements
7866 are either the floating-point constant 0.0 or the
7867 integer constant 0. */
7869 aarch64_simd_imm_zero_p (rtx x
, enum machine_mode mode
)
7871 return x
== CONST0_RTX (mode
);
7875 aarch64_simd_imm_scalar_p (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
)
7877 HOST_WIDE_INT imm
= INTVAL (x
);
7880 for (i
= 0; i
< 8; i
++)
7882 unsigned int byte
= imm
& 0xff;
7883 if (byte
!= 0xff && byte
!= 0)
7892 aarch64_mov_operand_p (rtx x
,
7893 enum aarch64_symbol_context context
,
7894 enum machine_mode mode
)
7896 if (GET_CODE (x
) == HIGH
7897 && aarch64_valid_symref (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
7900 if (CONST_INT_P (x
) && aarch64_move_imm (INTVAL (x
), mode
))
7903 if (GET_CODE (x
) == SYMBOL_REF
&& mode
== DImode
&& CONSTANT_ADDRESS_P (x
))
7906 return aarch64_classify_symbolic_expression (x
, context
)
7907 == SYMBOL_TINY_ABSOLUTE
;
7910 /* Return a const_int vector of VAL. */
7912 aarch64_simd_gen_const_vector_dup (enum machine_mode mode
, int val
)
7914 int nunits
= GET_MODE_NUNITS (mode
);
7915 rtvec v
= rtvec_alloc (nunits
);
7918 for (i
=0; i
< nunits
; i
++)
7919 RTVEC_ELT (v
, i
) = GEN_INT (val
);
7921 return gen_rtx_CONST_VECTOR (mode
, v
);
7924 /* Check OP is a legal scalar immediate for the MOVI instruction. */
7927 aarch64_simd_scalar_immediate_valid_for_move (rtx op
, enum machine_mode mode
)
7929 enum machine_mode vmode
;
7931 gcc_assert (!VECTOR_MODE_P (mode
));
7932 vmode
= aarch64_preferred_simd_mode (mode
);
7933 rtx op_v
= aarch64_simd_gen_const_vector_dup (vmode
, INTVAL (op
));
7934 return aarch64_simd_valid_immediate (op_v
, vmode
, false, NULL
);
7937 /* Construct and return a PARALLEL RTX vector with elements numbering the
7938 lanes of either the high (HIGH == TRUE) or low (HIGH == FALSE) half of
7939 the vector - from the perspective of the architecture. This does not
7940 line up with GCC's perspective on lane numbers, so we end up with
7941 different masks depending on our target endian-ness. The diagram
7942 below may help. We must draw the distinction when building masks
7943 which select one half of the vector. An instruction selecting
7944 architectural low-lanes for a big-endian target, must be described using
7945 a mask selecting GCC high-lanes.
7947 Big-Endian Little-Endian
7950 | x | x | x | x | | x | x | x | x |
7951 Architecture 3 2 1 0 3 2 1 0
7953 Low Mask: { 2, 3 } { 0, 1 }
7954 High Mask: { 0, 1 } { 2, 3 }
7958 aarch64_simd_vect_par_cnst_half (enum machine_mode mode
, bool high
)
7960 int nunits
= GET_MODE_NUNITS (mode
);
7961 rtvec v
= rtvec_alloc (nunits
/ 2);
7962 int high_base
= nunits
/ 2;
7968 if (BYTES_BIG_ENDIAN
)
7969 base
= high
? low_base
: high_base
;
7971 base
= high
? high_base
: low_base
;
7973 for (i
= 0; i
< nunits
/ 2; i
++)
7974 RTVEC_ELT (v
, i
) = GEN_INT (base
+ i
);
7976 t1
= gen_rtx_PARALLEL (mode
, v
);
7980 /* Check OP for validity as a PARALLEL RTX vector with elements
7981 numbering the lanes of either the high (HIGH == TRUE) or low lanes,
7982 from the perspective of the architecture. See the diagram above
7983 aarch64_simd_vect_par_cnst_half for more details. */
7986 aarch64_simd_check_vect_par_cnst_half (rtx op
, enum machine_mode mode
,
7989 rtx ideal
= aarch64_simd_vect_par_cnst_half (mode
, high
);
7990 HOST_WIDE_INT count_op
= XVECLEN (op
, 0);
7991 HOST_WIDE_INT count_ideal
= XVECLEN (ideal
, 0);
7994 if (!VECTOR_MODE_P (mode
))
7997 if (count_op
!= count_ideal
)
8000 for (i
= 0; i
< count_ideal
; i
++)
8002 rtx elt_op
= XVECEXP (op
, 0, i
);
8003 rtx elt_ideal
= XVECEXP (ideal
, 0, i
);
8005 if (!CONST_INT_P (elt_op
)
8006 || INTVAL (elt_ideal
) != INTVAL (elt_op
))
8012 /* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
8013 HIGH (exclusive). */
8015 aarch64_simd_lane_bounds (rtx operand
, HOST_WIDE_INT low
, HOST_WIDE_INT high
)
8018 gcc_assert (CONST_INT_P (operand
));
8019 lane
= INTVAL (operand
);
8021 if (lane
< low
|| lane
>= high
)
8022 error ("lane out of range");
8026 aarch64_simd_const_bounds (rtx operand
, HOST_WIDE_INT low
, HOST_WIDE_INT high
)
8028 gcc_assert (CONST_INT_P (operand
));
8029 HOST_WIDE_INT lane
= INTVAL (operand
);
8031 if (lane
< low
|| lane
>= high
)
8032 error ("constant out of range");
8035 /* Emit code to reinterpret one AdvSIMD type as another,
8036 without altering bits. */
8038 aarch64_simd_reinterpret (rtx dest
, rtx src
)
8040 emit_move_insn (dest
, gen_lowpart (GET_MODE (dest
), src
));
8043 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
8046 aarch64_simd_emit_pair_result_insn (enum machine_mode mode
,
8047 rtx (*intfn
) (rtx
, rtx
, rtx
), rtx destaddr
,
8050 rtx mem
= gen_rtx_MEM (mode
, destaddr
);
8051 rtx tmp1
= gen_reg_rtx (mode
);
8052 rtx tmp2
= gen_reg_rtx (mode
);
8054 emit_insn (intfn (tmp1
, op1
, tmp2
));
8056 emit_move_insn (mem
, tmp1
);
8057 mem
= adjust_address (mem
, mode
, GET_MODE_SIZE (mode
));
8058 emit_move_insn (mem
, tmp2
);
8061 /* Return TRUE if OP is a valid vector addressing mode. */
8063 aarch64_simd_mem_operand_p (rtx op
)
8065 return MEM_P (op
) && (GET_CODE (XEXP (op
, 0)) == POST_INC
8066 || REG_P (XEXP (op
, 0)));
8069 /* Set up OPERANDS for a register copy from SRC to DEST, taking care
8070 not to early-clobber SRC registers in the process.
8072 We assume that the operands described by SRC and DEST represent a
8073 decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
8074 number of components into which the copy has been decomposed. */
8076 aarch64_simd_disambiguate_copy (rtx
*operands
, rtx
*dest
,
8077 rtx
*src
, unsigned int count
)
8081 if (!reg_overlap_mentioned_p (operands
[0], operands
[1])
8082 || REGNO (operands
[0]) < REGNO (operands
[1]))
8084 for (i
= 0; i
< count
; i
++)
8086 operands
[2 * i
] = dest
[i
];
8087 operands
[2 * i
+ 1] = src
[i
];
8092 for (i
= 0; i
< count
; i
++)
8094 operands
[2 * i
] = dest
[count
- i
- 1];
8095 operands
[2 * i
+ 1] = src
[count
- i
- 1];
8100 /* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
8101 one of VSTRUCT modes: OI, CI or XI. */
8103 aarch64_simd_attr_length_move (rtx insn
)
8105 enum machine_mode mode
;
8107 extract_insn_cached (insn
);
8109 if (REG_P (recog_data
.operand
[0]) && REG_P (recog_data
.operand
[1]))
8111 mode
= GET_MODE (recog_data
.operand
[0]);
8127 /* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum
8128 alignment of a vector to 128 bits. */
8129 static HOST_WIDE_INT
8130 aarch64_simd_vector_alignment (const_tree type
)
8132 HOST_WIDE_INT align
= tree_to_shwi (TYPE_SIZE (type
));
8133 return MIN (align
, 128);
8136 /* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */
8138 aarch64_simd_vector_alignment_reachable (const_tree type
, bool is_packed
)
8143 /* We guarantee alignment for vectors up to 128-bits. */
8144 if (tree_int_cst_compare (TYPE_SIZE (type
),
8145 bitsize_int (BIGGEST_ALIGNMENT
)) > 0)
8148 /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
8152 /* If VALS is a vector constant that can be loaded into a register
8153 using DUP, generate instructions to do so and return an RTX to
8154 assign to the register. Otherwise return NULL_RTX. */
8156 aarch64_simd_dup_constant (rtx vals
)
8158 enum machine_mode mode
= GET_MODE (vals
);
8159 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
8160 int n_elts
= GET_MODE_NUNITS (mode
);
8161 bool all_same
= true;
8165 if (GET_CODE (vals
) != CONST_VECTOR
)
8168 for (i
= 1; i
< n_elts
; ++i
)
8170 x
= CONST_VECTOR_ELT (vals
, i
);
8171 if (!rtx_equal_p (x
, CONST_VECTOR_ELT (vals
, 0)))
8178 /* We can load this constant by using DUP and a constant in a
8179 single ARM register. This will be cheaper than a vector
8181 x
= copy_to_mode_reg (inner_mode
, CONST_VECTOR_ELT (vals
, 0));
8182 return gen_rtx_VEC_DUPLICATE (mode
, x
);
8186 /* Generate code to load VALS, which is a PARALLEL containing only
8187 constants (for vec_init) or CONST_VECTOR, efficiently into a
8188 register. Returns an RTX to copy into the register, or NULL_RTX
8189 for a PARALLEL that can not be converted into a CONST_VECTOR. */
8191 aarch64_simd_make_constant (rtx vals
)
8193 enum machine_mode mode
= GET_MODE (vals
);
8195 rtx const_vec
= NULL_RTX
;
8196 int n_elts
= GET_MODE_NUNITS (mode
);
8200 if (GET_CODE (vals
) == CONST_VECTOR
)
8202 else if (GET_CODE (vals
) == PARALLEL
)
8204 /* A CONST_VECTOR must contain only CONST_INTs and
8205 CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
8206 Only store valid constants in a CONST_VECTOR. */
8207 for (i
= 0; i
< n_elts
; ++i
)
8209 rtx x
= XVECEXP (vals
, 0, i
);
8210 if (CONST_INT_P (x
) || CONST_DOUBLE_P (x
))
8213 if (n_const
== n_elts
)
8214 const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
8219 if (const_vec
!= NULL_RTX
8220 && aarch64_simd_valid_immediate (const_vec
, mode
, false, NULL
))
8221 /* Load using MOVI/MVNI. */
8223 else if ((const_dup
= aarch64_simd_dup_constant (vals
)) != NULL_RTX
)
8224 /* Loaded using DUP. */
8226 else if (const_vec
!= NULL_RTX
)
8227 /* Load from constant pool. We can not take advantage of single-cycle
8228 LD1 because we need a PC-relative addressing mode. */
8231 /* A PARALLEL containing something not valid inside CONST_VECTOR.
8232 We can not construct an initializer. */
8237 aarch64_expand_vector_init (rtx target
, rtx vals
)
8239 enum machine_mode mode
= GET_MODE (target
);
8240 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
8241 int n_elts
= GET_MODE_NUNITS (mode
);
8242 int n_var
= 0, one_var
= -1;
8243 bool all_same
= true;
8247 x
= XVECEXP (vals
, 0, 0);
8248 if (!CONST_INT_P (x
) && !CONST_DOUBLE_P (x
))
8249 n_var
= 1, one_var
= 0;
8251 for (i
= 1; i
< n_elts
; ++i
)
8253 x
= XVECEXP (vals
, 0, i
);
8254 if (!CONST_INT_P (x
) && !CONST_DOUBLE_P (x
))
8255 ++n_var
, one_var
= i
;
8257 if (!rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
8263 rtx constant
= aarch64_simd_make_constant (vals
);
8264 if (constant
!= NULL_RTX
)
8266 emit_move_insn (target
, constant
);
8271 /* Splat a single non-constant element if we can. */
8274 x
= copy_to_mode_reg (inner_mode
, XVECEXP (vals
, 0, 0));
8275 aarch64_emit_move (target
, gen_rtx_VEC_DUPLICATE (mode
, x
));
8279 /* One field is non-constant. Load constant then overwrite varying
8280 field. This is more efficient than using the stack. */
8283 rtx copy
= copy_rtx (vals
);
8284 rtx index
= GEN_INT (one_var
);
8285 enum insn_code icode
;
8287 /* Load constant part of vector, substitute neighboring value for
8289 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, one_var
^ 1);
8290 aarch64_expand_vector_init (target
, copy
);
8292 /* Insert variable. */
8293 x
= copy_to_mode_reg (inner_mode
, XVECEXP (vals
, 0, one_var
));
8294 icode
= optab_handler (vec_set_optab
, mode
);
8295 gcc_assert (icode
!= CODE_FOR_nothing
);
8296 emit_insn (GEN_FCN (icode
) (target
, x
, index
));
8300 /* Construct the vector in memory one field at a time
8301 and load the whole vector. */
8302 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
8303 for (i
= 0; i
< n_elts
; i
++)
8304 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
8305 i
* GET_MODE_SIZE (inner_mode
)),
8306 XVECEXP (vals
, 0, i
));
8307 emit_move_insn (target
, mem
);
8311 static unsigned HOST_WIDE_INT
8312 aarch64_shift_truncation_mask (enum machine_mode mode
)
8315 (aarch64_vector_mode_supported_p (mode
)
8316 || aarch64_vect_struct_mode_p (mode
)) ? 0 : (GET_MODE_BITSIZE (mode
) - 1);
8319 #ifndef TLS_SECTION_ASM_FLAG
8320 #define TLS_SECTION_ASM_FLAG 'T'
8324 aarch64_elf_asm_named_section (const char *name
, unsigned int flags
,
8325 tree decl ATTRIBUTE_UNUSED
)
8327 char flagchars
[10], *f
= flagchars
;
8329 /* If we have already declared this section, we can use an
8330 abbreviated form to switch back to it -- unless this section is
8331 part of a COMDAT groups, in which case GAS requires the full
8332 declaration every time. */
8333 if (!(HAVE_COMDAT_GROUP
&& (flags
& SECTION_LINKONCE
))
8334 && (flags
& SECTION_DECLARED
))
8336 fprintf (asm_out_file
, "\t.section\t%s\n", name
);
8340 if (!(flags
& SECTION_DEBUG
))
8342 if (flags
& SECTION_WRITE
)
8344 if (flags
& SECTION_CODE
)
8346 if (flags
& SECTION_SMALL
)
8348 if (flags
& SECTION_MERGE
)
8350 if (flags
& SECTION_STRINGS
)
8352 if (flags
& SECTION_TLS
)
8353 *f
++ = TLS_SECTION_ASM_FLAG
;
8354 if (HAVE_COMDAT_GROUP
&& (flags
& SECTION_LINKONCE
))
8358 fprintf (asm_out_file
, "\t.section\t%s,\"%s\"", name
, flagchars
);
8360 if (!(flags
& SECTION_NOTYPE
))
8365 if (flags
& SECTION_BSS
)
8370 #ifdef TYPE_OPERAND_FMT
8371 format
= "," TYPE_OPERAND_FMT
;
8376 fprintf (asm_out_file
, format
, type
);
8378 if (flags
& SECTION_ENTSIZE
)
8379 fprintf (asm_out_file
, ",%d", flags
& SECTION_ENTSIZE
);
8380 if (HAVE_COMDAT_GROUP
&& (flags
& SECTION_LINKONCE
))
8382 if (TREE_CODE (decl
) == IDENTIFIER_NODE
)
8383 fprintf (asm_out_file
, ",%s,comdat", IDENTIFIER_POINTER (decl
));
8385 fprintf (asm_out_file
, ",%s,comdat",
8386 IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl
)));
8390 putc ('\n', asm_out_file
);
8393 /* Select a format to encode pointers in exception handling data. */
8395 aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED
, int global
)
8398 switch (aarch64_cmodel
)
8400 case AARCH64_CMODEL_TINY
:
8401 case AARCH64_CMODEL_TINY_PIC
:
8402 case AARCH64_CMODEL_SMALL
:
8403 case AARCH64_CMODEL_SMALL_PIC
:
8404 /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
8406 type
= DW_EH_PE_sdata4
;
8409 /* No assumptions here. 8-byte relocs required. */
8410 type
= DW_EH_PE_sdata8
;
8413 return (global
? DW_EH_PE_indirect
: 0) | DW_EH_PE_pcrel
| type
;
8416 /* Emit load exclusive. */
8419 aarch64_emit_load_exclusive (enum machine_mode mode
, rtx rval
,
8420 rtx mem
, rtx model_rtx
)
8422 rtx (*gen
) (rtx
, rtx
, rtx
);
8426 case QImode
: gen
= gen_aarch64_load_exclusiveqi
; break;
8427 case HImode
: gen
= gen_aarch64_load_exclusivehi
; break;
8428 case SImode
: gen
= gen_aarch64_load_exclusivesi
; break;
8429 case DImode
: gen
= gen_aarch64_load_exclusivedi
; break;
8434 emit_insn (gen (rval
, mem
, model_rtx
));
8437 /* Emit store exclusive. */
8440 aarch64_emit_store_exclusive (enum machine_mode mode
, rtx bval
,
8441 rtx rval
, rtx mem
, rtx model_rtx
)
8443 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
);
8447 case QImode
: gen
= gen_aarch64_store_exclusiveqi
; break;
8448 case HImode
: gen
= gen_aarch64_store_exclusivehi
; break;
8449 case SImode
: gen
= gen_aarch64_store_exclusivesi
; break;
8450 case DImode
: gen
= gen_aarch64_store_exclusivedi
; break;
8455 emit_insn (gen (bval
, rval
, mem
, model_rtx
));
8458 /* Mark the previous jump instruction as unlikely. */
8461 aarch64_emit_unlikely_jump (rtx insn
)
8463 int very_unlikely
= REG_BR_PROB_BASE
/ 100 - 1;
8465 insn
= emit_jump_insn (insn
);
8466 add_int_reg_note (insn
, REG_BR_PROB
, very_unlikely
);
8469 /* Expand a compare and swap pattern. */
8472 aarch64_expand_compare_and_swap (rtx operands
[])
8474 rtx bval
, rval
, mem
, oldval
, newval
, is_weak
, mod_s
, mod_f
, x
;
8475 enum machine_mode mode
, cmp_mode
;
8476 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
8481 oldval
= operands
[3];
8482 newval
= operands
[4];
8483 is_weak
= operands
[5];
8484 mod_s
= operands
[6];
8485 mod_f
= operands
[7];
8486 mode
= GET_MODE (mem
);
8489 /* Normally the succ memory model must be stronger than fail, but in the
8490 unlikely event of fail being ACQUIRE and succ being RELEASE we need to
8491 promote succ to ACQ_REL so that we don't lose the acquire semantics. */
8493 if (INTVAL (mod_f
) == MEMMODEL_ACQUIRE
8494 && INTVAL (mod_s
) == MEMMODEL_RELEASE
)
8495 mod_s
= GEN_INT (MEMMODEL_ACQ_REL
);
8501 /* For short modes, we're going to perform the comparison in SImode,
8502 so do the zero-extension now. */
8504 rval
= gen_reg_rtx (SImode
);
8505 oldval
= convert_modes (SImode
, mode
, oldval
, true);
8510 /* Force the value into a register if needed. */
8511 if (!aarch64_plus_operand (oldval
, mode
))
8512 oldval
= force_reg (cmp_mode
, oldval
);
8521 case QImode
: gen
= gen_atomic_compare_and_swapqi_1
; break;
8522 case HImode
: gen
= gen_atomic_compare_and_swaphi_1
; break;
8523 case SImode
: gen
= gen_atomic_compare_and_swapsi_1
; break;
8524 case DImode
: gen
= gen_atomic_compare_and_swapdi_1
; break;
8529 emit_insn (gen (rval
, mem
, oldval
, newval
, is_weak
, mod_s
, mod_f
));
8531 if (mode
== QImode
|| mode
== HImode
)
8532 emit_move_insn (operands
[1], gen_lowpart (mode
, rval
));
8534 x
= gen_rtx_REG (CCmode
, CC_REGNUM
);
8535 x
= gen_rtx_EQ (SImode
, x
, const0_rtx
);
8536 emit_insn (gen_rtx_SET (VOIDmode
, bval
, x
));
8539 /* Split a compare and swap pattern. */
8542 aarch64_split_compare_and_swap (rtx operands
[])
8544 rtx rval
, mem
, oldval
, newval
, scratch
;
8545 enum machine_mode mode
;
8547 rtx_code_label
*label1
, *label2
;
8552 oldval
= operands
[2];
8553 newval
= operands
[3];
8554 is_weak
= (operands
[4] != const0_rtx
);
8555 scratch
= operands
[7];
8556 mode
= GET_MODE (mem
);
8561 label1
= gen_label_rtx ();
8562 emit_label (label1
);
8564 label2
= gen_label_rtx ();
8566 aarch64_emit_load_exclusive (mode
, rval
, mem
, operands
[5]);
8568 cond
= aarch64_gen_compare_reg (NE
, rval
, oldval
);
8569 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
8570 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
,
8571 gen_rtx_LABEL_REF (Pmode
, label2
), pc_rtx
);
8572 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
8574 aarch64_emit_store_exclusive (mode
, scratch
, mem
, newval
, operands
[5]);
8578 x
= gen_rtx_NE (VOIDmode
, scratch
, const0_rtx
);
8579 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
,
8580 gen_rtx_LABEL_REF (Pmode
, label1
), pc_rtx
);
8581 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
8585 cond
= gen_rtx_REG (CCmode
, CC_REGNUM
);
8586 x
= gen_rtx_COMPARE (CCmode
, scratch
, const0_rtx
);
8587 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
8590 emit_label (label2
);
8593 /* Split an atomic operation. */
8596 aarch64_split_atomic_op (enum rtx_code code
, rtx old_out
, rtx new_out
, rtx mem
,
8597 rtx value
, rtx model_rtx
, rtx cond
)
8599 enum machine_mode mode
= GET_MODE (mem
);
8600 enum machine_mode wmode
= (mode
== DImode
? DImode
: SImode
);
8601 rtx_code_label
*label
;
8604 label
= gen_label_rtx ();
8608 new_out
= gen_lowpart (wmode
, new_out
);
8610 old_out
= gen_lowpart (wmode
, old_out
);
8613 value
= simplify_gen_subreg (wmode
, value
, mode
, 0);
8615 aarch64_emit_load_exclusive (mode
, old_out
, mem
, model_rtx
);
8624 x
= gen_rtx_AND (wmode
, old_out
, value
);
8625 emit_insn (gen_rtx_SET (VOIDmode
, new_out
, x
));
8626 x
= gen_rtx_NOT (wmode
, new_out
);
8627 emit_insn (gen_rtx_SET (VOIDmode
, new_out
, x
));
8631 if (CONST_INT_P (value
))
8633 value
= GEN_INT (-INTVAL (value
));
8639 x
= gen_rtx_fmt_ee (code
, wmode
, old_out
, value
);
8640 emit_insn (gen_rtx_SET (VOIDmode
, new_out
, x
));
8644 aarch64_emit_store_exclusive (mode
, cond
, mem
,
8645 gen_lowpart (mode
, new_out
), model_rtx
);
8647 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
8648 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
,
8649 gen_rtx_LABEL_REF (Pmode
, label
), pc_rtx
);
8650 aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
8654 aarch64_print_extension (void)
8656 const struct aarch64_option_extension
*opt
= NULL
;
8658 for (opt
= all_extensions
; opt
->name
!= NULL
; opt
++)
8659 if ((aarch64_isa_flags
& opt
->flags_on
) == opt
->flags_on
)
8660 asm_fprintf (asm_out_file
, "+%s", opt
->name
);
8662 asm_fprintf (asm_out_file
, "\n");
8666 aarch64_start_file (void)
8670 asm_fprintf (asm_out_file
, "\t.arch %s", selected_arch
->name
);
8671 aarch64_print_extension ();
8673 else if (selected_cpu
)
8675 const char *truncated_name
8676 = aarch64_rewrite_selected_cpu (selected_cpu
->name
);
8677 asm_fprintf (asm_out_file
, "\t.cpu %s", truncated_name
);
8678 aarch64_print_extension ();
8680 default_file_start();
8683 /* Target hook for c_mode_for_suffix. */
8684 static enum machine_mode
8685 aarch64_c_mode_for_suffix (char suffix
)
8693 /* We can only represent floating point constants which will fit in
8694 "quarter-precision" values. These values are characterised by
8695 a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given
8698 (-1)^s * (n/16) * 2^r
8701 's' is the sign bit.
8702 'n' is an integer in the range 16 <= n <= 31.
8703 'r' is an integer in the range -3 <= r <= 4. */
8705 /* Return true iff X can be represented by a quarter-precision
8706 floating point immediate operand X. Note, we cannot represent 0.0. */
8708 aarch64_float_const_representable_p (rtx x
)
8710 /* This represents our current view of how many bits
8711 make up the mantissa. */
8712 int point_pos
= 2 * HOST_BITS_PER_WIDE_INT
- 1;
8714 unsigned HOST_WIDE_INT mantissa
, mask
;
8715 REAL_VALUE_TYPE r
, m
;
8718 if (!CONST_DOUBLE_P (x
))
8721 if (GET_MODE (x
) == VOIDmode
)
8724 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
8726 /* We cannot represent infinities, NaNs or +/-zero. We won't
8727 know if we have +zero until we analyse the mantissa, but we
8728 can reject the other invalid values. */
8729 if (REAL_VALUE_ISINF (r
) || REAL_VALUE_ISNAN (r
)
8730 || REAL_VALUE_MINUS_ZERO (r
))
8733 /* Extract exponent. */
8734 r
= real_value_abs (&r
);
8735 exponent
= REAL_EXP (&r
);
8737 /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
8738 highest (sign) bit, with a fixed binary point at bit point_pos.
8739 m1 holds the low part of the mantissa, m2 the high part.
8740 WARNING: If we ever have a representation using more than 2 * H_W_I - 1
8741 bits for the mantissa, this can fail (low bits will be lost). */
8742 real_ldexp (&m
, &r
, point_pos
- exponent
);
8743 wide_int w
= real_to_integer (&m
, &fail
, HOST_BITS_PER_WIDE_INT
* 2);
8745 /* If the low part of the mantissa has bits set we cannot represent
8749 /* We have rejected the lower HOST_WIDE_INT, so update our
8750 understanding of how many bits lie in the mantissa and
8751 look only at the high HOST_WIDE_INT. */
8752 mantissa
= w
.elt (1);
8753 point_pos
-= HOST_BITS_PER_WIDE_INT
;
8755 /* We can only represent values with a mantissa of the form 1.xxxx. */
8756 mask
= ((unsigned HOST_WIDE_INT
)1 << (point_pos
- 5)) - 1;
8757 if ((mantissa
& mask
) != 0)
8760 /* Having filtered unrepresentable values, we may now remove all
8761 but the highest 5 bits. */
8762 mantissa
>>= point_pos
- 5;
8764 /* We cannot represent the value 0.0, so reject it. This is handled
8769 /* Then, as bit 4 is always set, we can mask it off, leaving
8770 the mantissa in the range [0, 15]. */
8771 mantissa
&= ~(1 << 4);
8772 gcc_assert (mantissa
<= 15);
8774 /* GCC internally does not use IEEE754-like encoding (where normalized
8775 significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c).
8776 Our mantissa values are shifted 4 places to the left relative to
8777 normalized IEEE754 so we must modify the exponent returned by REAL_EXP
8778 by 5 places to correct for GCC's representation. */
8779 exponent
= 5 - exponent
;
8781 return (exponent
>= 0 && exponent
<= 7);
8785 aarch64_output_simd_mov_immediate (rtx const_vector
,
8786 enum machine_mode mode
,
8790 static char templ
[40];
8791 const char *mnemonic
;
8792 const char *shift_op
;
8793 unsigned int lane_count
= 0;
8796 struct simd_immediate_info info
= { NULL_RTX
, 0, 0, false, false };
8798 /* This will return true to show const_vector is legal for use as either
8799 a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will
8800 also update INFO to show how the immediate should be generated. */
8801 is_valid
= aarch64_simd_valid_immediate (const_vector
, mode
, false, &info
);
8802 gcc_assert (is_valid
);
8804 element_char
= sizetochar (info
.element_width
);
8805 lane_count
= width
/ info
.element_width
;
8807 mode
= GET_MODE_INNER (mode
);
8808 if (mode
== SFmode
|| mode
== DFmode
)
8810 gcc_assert (info
.shift
== 0 && ! info
.mvn
);
8811 if (aarch64_float_const_zero_rtx_p (info
.value
))
8812 info
.value
= GEN_INT (0);
8817 REAL_VALUE_FROM_CONST_DOUBLE (r
, info
.value
);
8818 char float_buf
[buf_size
] = {'\0'};
8819 real_to_decimal_for_mode (float_buf
, &r
, buf_size
, buf_size
, 1, mode
);
8822 if (lane_count
== 1)
8823 snprintf (templ
, sizeof (templ
), "fmov\t%%d0, %s", float_buf
);
8825 snprintf (templ
, sizeof (templ
), "fmov\t%%0.%d%c, %s",
8826 lane_count
, element_char
, float_buf
);
8831 mnemonic
= info
.mvn
? "mvni" : "movi";
8832 shift_op
= info
.msl
? "msl" : "lsl";
8834 if (lane_count
== 1)
8835 snprintf (templ
, sizeof (templ
), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX
,
8836 mnemonic
, UINTVAL (info
.value
));
8837 else if (info
.shift
)
8838 snprintf (templ
, sizeof (templ
), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
8839 ", %s %d", mnemonic
, lane_count
, element_char
,
8840 UINTVAL (info
.value
), shift_op
, info
.shift
);
8842 snprintf (templ
, sizeof (templ
), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
,
8843 mnemonic
, lane_count
, element_char
, UINTVAL (info
.value
));
8848 aarch64_output_scalar_simd_mov_immediate (rtx immediate
,
8849 enum machine_mode mode
)
8851 enum machine_mode vmode
;
8853 gcc_assert (!VECTOR_MODE_P (mode
));
8854 vmode
= aarch64_simd_container_mode (mode
, 64);
8855 rtx v_op
= aarch64_simd_gen_const_vector_dup (vmode
, INTVAL (immediate
));
8856 return aarch64_output_simd_mov_immediate (v_op
, vmode
, 64);
8859 /* Split operands into moves from op[1] + op[2] into op[0]. */
8862 aarch64_split_combinev16qi (rtx operands
[3])
8864 unsigned int dest
= REGNO (operands
[0]);
8865 unsigned int src1
= REGNO (operands
[1]);
8866 unsigned int src2
= REGNO (operands
[2]);
8867 enum machine_mode halfmode
= GET_MODE (operands
[1]);
8868 unsigned int halfregs
= HARD_REGNO_NREGS (src1
, halfmode
);
8871 gcc_assert (halfmode
== V16QImode
);
8873 if (src1
== dest
&& src2
== dest
+ halfregs
)
8875 /* No-op move. Can't split to nothing; emit something. */
8876 emit_note (NOTE_INSN_DELETED
);
8880 /* Preserve register attributes for variable tracking. */
8881 destlo
= gen_rtx_REG_offset (operands
[0], halfmode
, dest
, 0);
8882 desthi
= gen_rtx_REG_offset (operands
[0], halfmode
, dest
+ halfregs
,
8883 GET_MODE_SIZE (halfmode
));
8885 /* Special case of reversed high/low parts. */
8886 if (reg_overlap_mentioned_p (operands
[2], destlo
)
8887 && reg_overlap_mentioned_p (operands
[1], desthi
))
8889 emit_insn (gen_xorv16qi3 (operands
[1], operands
[1], operands
[2]));
8890 emit_insn (gen_xorv16qi3 (operands
[2], operands
[1], operands
[2]));
8891 emit_insn (gen_xorv16qi3 (operands
[1], operands
[1], operands
[2]));
8893 else if (!reg_overlap_mentioned_p (operands
[2], destlo
))
8895 /* Try to avoid unnecessary moves if part of the result
8896 is in the right place already. */
8898 emit_move_insn (destlo
, operands
[1]);
8899 if (src2
!= dest
+ halfregs
)
8900 emit_move_insn (desthi
, operands
[2]);
8904 if (src2
!= dest
+ halfregs
)
8905 emit_move_insn (desthi
, operands
[2]);
8907 emit_move_insn (destlo
, operands
[1]);
8911 /* vec_perm support. */
8913 #define MAX_VECT_LEN 16
8915 struct expand_vec_perm_d
8917 rtx target
, op0
, op1
;
8918 unsigned char perm
[MAX_VECT_LEN
];
8919 enum machine_mode vmode
;
8925 /* Generate a variable permutation. */
8928 aarch64_expand_vec_perm_1 (rtx target
, rtx op0
, rtx op1
, rtx sel
)
8930 enum machine_mode vmode
= GET_MODE (target
);
8931 bool one_vector_p
= rtx_equal_p (op0
, op1
);
8933 gcc_checking_assert (vmode
== V8QImode
|| vmode
== V16QImode
);
8934 gcc_checking_assert (GET_MODE (op0
) == vmode
);
8935 gcc_checking_assert (GET_MODE (op1
) == vmode
);
8936 gcc_checking_assert (GET_MODE (sel
) == vmode
);
8937 gcc_checking_assert (TARGET_SIMD
);
8941 if (vmode
== V8QImode
)
8943 /* Expand the argument to a V16QI mode by duplicating it. */
8944 rtx pair
= gen_reg_rtx (V16QImode
);
8945 emit_insn (gen_aarch64_combinev8qi (pair
, op0
, op0
));
8946 emit_insn (gen_aarch64_tbl1v8qi (target
, pair
, sel
));
8950 emit_insn (gen_aarch64_tbl1v16qi (target
, op0
, sel
));
8957 if (vmode
== V8QImode
)
8959 pair
= gen_reg_rtx (V16QImode
);
8960 emit_insn (gen_aarch64_combinev8qi (pair
, op0
, op1
));
8961 emit_insn (gen_aarch64_tbl1v8qi (target
, pair
, sel
));
8965 pair
= gen_reg_rtx (OImode
);
8966 emit_insn (gen_aarch64_combinev16qi (pair
, op0
, op1
));
8967 emit_insn (gen_aarch64_tbl2v16qi (target
, pair
, sel
));
8973 aarch64_expand_vec_perm (rtx target
, rtx op0
, rtx op1
, rtx sel
)
8975 enum machine_mode vmode
= GET_MODE (target
);
8976 unsigned int nelt
= GET_MODE_NUNITS (vmode
);
8977 bool one_vector_p
= rtx_equal_p (op0
, op1
);
8980 /* The TBL instruction does not use a modulo index, so we must take care
8981 of that ourselves. */
8982 mask
= aarch64_simd_gen_const_vector_dup (vmode
,
8983 one_vector_p
? nelt
- 1 : 2 * nelt
- 1);
8984 sel
= expand_simple_binop (vmode
, AND
, sel
, mask
, NULL
, 0, OPTAB_LIB_WIDEN
);
8986 /* For big-endian, we also need to reverse the index within the vector
8987 (but not which vector). */
8988 if (BYTES_BIG_ENDIAN
)
8990 /* If one_vector_p, mask is a vector of (nelt - 1)'s already. */
8992 mask
= aarch64_simd_gen_const_vector_dup (vmode
, nelt
- 1);
8993 sel
= expand_simple_binop (vmode
, XOR
, sel
, mask
,
8994 NULL
, 0, OPTAB_LIB_WIDEN
);
8996 aarch64_expand_vec_perm_1 (target
, op0
, op1
, sel
);
8999 /* Recognize patterns suitable for the TRN instructions. */
9001 aarch64_evpc_trn (struct expand_vec_perm_d
*d
)
9003 unsigned int i
, odd
, mask
, nelt
= d
->nelt
;
9004 rtx out
, in0
, in1
, x
;
9005 rtx (*gen
) (rtx
, rtx
, rtx
);
9006 enum machine_mode vmode
= d
->vmode
;
9008 if (GET_MODE_UNIT_SIZE (vmode
) > 8)
9011 /* Note that these are little-endian tests.
9012 We correct for big-endian later. */
9013 if (d
->perm
[0] == 0)
9015 else if (d
->perm
[0] == 1)
9019 mask
= (d
->one_vector_p
? nelt
- 1 : 2 * nelt
- 1);
9021 for (i
= 0; i
< nelt
; i
+= 2)
9023 if (d
->perm
[i
] != i
+ odd
)
9025 if (d
->perm
[i
+ 1] != ((i
+ nelt
+ odd
) & mask
))
9035 if (BYTES_BIG_ENDIAN
)
9037 x
= in0
, in0
= in1
, in1
= x
;
9046 case V16QImode
: gen
= gen_aarch64_trn2v16qi
; break;
9047 case V8QImode
: gen
= gen_aarch64_trn2v8qi
; break;
9048 case V8HImode
: gen
= gen_aarch64_trn2v8hi
; break;
9049 case V4HImode
: gen
= gen_aarch64_trn2v4hi
; break;
9050 case V4SImode
: gen
= gen_aarch64_trn2v4si
; break;
9051 case V2SImode
: gen
= gen_aarch64_trn2v2si
; break;
9052 case V2DImode
: gen
= gen_aarch64_trn2v2di
; break;
9053 case V4SFmode
: gen
= gen_aarch64_trn2v4sf
; break;
9054 case V2SFmode
: gen
= gen_aarch64_trn2v2sf
; break;
9055 case V2DFmode
: gen
= gen_aarch64_trn2v2df
; break;
9064 case V16QImode
: gen
= gen_aarch64_trn1v16qi
; break;
9065 case V8QImode
: gen
= gen_aarch64_trn1v8qi
; break;
9066 case V8HImode
: gen
= gen_aarch64_trn1v8hi
; break;
9067 case V4HImode
: gen
= gen_aarch64_trn1v4hi
; break;
9068 case V4SImode
: gen
= gen_aarch64_trn1v4si
; break;
9069 case V2SImode
: gen
= gen_aarch64_trn1v2si
; break;
9070 case V2DImode
: gen
= gen_aarch64_trn1v2di
; break;
9071 case V4SFmode
: gen
= gen_aarch64_trn1v4sf
; break;
9072 case V2SFmode
: gen
= gen_aarch64_trn1v2sf
; break;
9073 case V2DFmode
: gen
= gen_aarch64_trn1v2df
; break;
9079 emit_insn (gen (out
, in0
, in1
));
9083 /* Recognize patterns suitable for the UZP instructions. */
9085 aarch64_evpc_uzp (struct expand_vec_perm_d
*d
)
9087 unsigned int i
, odd
, mask
, nelt
= d
->nelt
;
9088 rtx out
, in0
, in1
, x
;
9089 rtx (*gen
) (rtx
, rtx
, rtx
);
9090 enum machine_mode vmode
= d
->vmode
;
9092 if (GET_MODE_UNIT_SIZE (vmode
) > 8)
9095 /* Note that these are little-endian tests.
9096 We correct for big-endian later. */
9097 if (d
->perm
[0] == 0)
9099 else if (d
->perm
[0] == 1)
9103 mask
= (d
->one_vector_p
? nelt
- 1 : 2 * nelt
- 1);
9105 for (i
= 0; i
< nelt
; i
++)
9107 unsigned elt
= (i
* 2 + odd
) & mask
;
9108 if (d
->perm
[i
] != elt
)
9118 if (BYTES_BIG_ENDIAN
)
9120 x
= in0
, in0
= in1
, in1
= x
;
9129 case V16QImode
: gen
= gen_aarch64_uzp2v16qi
; break;
9130 case V8QImode
: gen
= gen_aarch64_uzp2v8qi
; break;
9131 case V8HImode
: gen
= gen_aarch64_uzp2v8hi
; break;
9132 case V4HImode
: gen
= gen_aarch64_uzp2v4hi
; break;
9133 case V4SImode
: gen
= gen_aarch64_uzp2v4si
; break;
9134 case V2SImode
: gen
= gen_aarch64_uzp2v2si
; break;
9135 case V2DImode
: gen
= gen_aarch64_uzp2v2di
; break;
9136 case V4SFmode
: gen
= gen_aarch64_uzp2v4sf
; break;
9137 case V2SFmode
: gen
= gen_aarch64_uzp2v2sf
; break;
9138 case V2DFmode
: gen
= gen_aarch64_uzp2v2df
; break;
9147 case V16QImode
: gen
= gen_aarch64_uzp1v16qi
; break;
9148 case V8QImode
: gen
= gen_aarch64_uzp1v8qi
; break;
9149 case V8HImode
: gen
= gen_aarch64_uzp1v8hi
; break;
9150 case V4HImode
: gen
= gen_aarch64_uzp1v4hi
; break;
9151 case V4SImode
: gen
= gen_aarch64_uzp1v4si
; break;
9152 case V2SImode
: gen
= gen_aarch64_uzp1v2si
; break;
9153 case V2DImode
: gen
= gen_aarch64_uzp1v2di
; break;
9154 case V4SFmode
: gen
= gen_aarch64_uzp1v4sf
; break;
9155 case V2SFmode
: gen
= gen_aarch64_uzp1v2sf
; break;
9156 case V2DFmode
: gen
= gen_aarch64_uzp1v2df
; break;
9162 emit_insn (gen (out
, in0
, in1
));
9166 /* Recognize patterns suitable for the ZIP instructions. */
9168 aarch64_evpc_zip (struct expand_vec_perm_d
*d
)
9170 unsigned int i
, high
, mask
, nelt
= d
->nelt
;
9171 rtx out
, in0
, in1
, x
;
9172 rtx (*gen
) (rtx
, rtx
, rtx
);
9173 enum machine_mode vmode
= d
->vmode
;
9175 if (GET_MODE_UNIT_SIZE (vmode
) > 8)
9178 /* Note that these are little-endian tests.
9179 We correct for big-endian later. */
9181 if (d
->perm
[0] == high
)
9184 else if (d
->perm
[0] == 0)
9188 mask
= (d
->one_vector_p
? nelt
- 1 : 2 * nelt
- 1);
9190 for (i
= 0; i
< nelt
/ 2; i
++)
9192 unsigned elt
= (i
+ high
) & mask
;
9193 if (d
->perm
[i
* 2] != elt
)
9195 elt
= (elt
+ nelt
) & mask
;
9196 if (d
->perm
[i
* 2 + 1] != elt
)
9206 if (BYTES_BIG_ENDIAN
)
9208 x
= in0
, in0
= in1
, in1
= x
;
9217 case V16QImode
: gen
= gen_aarch64_zip2v16qi
; break;
9218 case V8QImode
: gen
= gen_aarch64_zip2v8qi
; break;
9219 case V8HImode
: gen
= gen_aarch64_zip2v8hi
; break;
9220 case V4HImode
: gen
= gen_aarch64_zip2v4hi
; break;
9221 case V4SImode
: gen
= gen_aarch64_zip2v4si
; break;
9222 case V2SImode
: gen
= gen_aarch64_zip2v2si
; break;
9223 case V2DImode
: gen
= gen_aarch64_zip2v2di
; break;
9224 case V4SFmode
: gen
= gen_aarch64_zip2v4sf
; break;
9225 case V2SFmode
: gen
= gen_aarch64_zip2v2sf
; break;
9226 case V2DFmode
: gen
= gen_aarch64_zip2v2df
; break;
9235 case V16QImode
: gen
= gen_aarch64_zip1v16qi
; break;
9236 case V8QImode
: gen
= gen_aarch64_zip1v8qi
; break;
9237 case V8HImode
: gen
= gen_aarch64_zip1v8hi
; break;
9238 case V4HImode
: gen
= gen_aarch64_zip1v4hi
; break;
9239 case V4SImode
: gen
= gen_aarch64_zip1v4si
; break;
9240 case V2SImode
: gen
= gen_aarch64_zip1v2si
; break;
9241 case V2DImode
: gen
= gen_aarch64_zip1v2di
; break;
9242 case V4SFmode
: gen
= gen_aarch64_zip1v4sf
; break;
9243 case V2SFmode
: gen
= gen_aarch64_zip1v2sf
; break;
9244 case V2DFmode
: gen
= gen_aarch64_zip1v2df
; break;
9250 emit_insn (gen (out
, in0
, in1
));
9254 /* Recognize patterns for the EXT insn. */
9257 aarch64_evpc_ext (struct expand_vec_perm_d
*d
)
9259 unsigned int i
, nelt
= d
->nelt
;
9260 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
);
9263 unsigned int location
= d
->perm
[0]; /* Always < nelt. */
9265 /* Check if the extracted indices are increasing by one. */
9266 for (i
= 1; i
< nelt
; i
++)
9268 unsigned int required
= location
+ i
;
9269 if (d
->one_vector_p
)
9271 /* We'll pass the same vector in twice, so allow indices to wrap. */
9272 required
&= (nelt
- 1);
9274 if (d
->perm
[i
] != required
)
9280 case V16QImode
: gen
= gen_aarch64_extv16qi
; break;
9281 case V8QImode
: gen
= gen_aarch64_extv8qi
; break;
9282 case V4HImode
: gen
= gen_aarch64_extv4hi
; break;
9283 case V8HImode
: gen
= gen_aarch64_extv8hi
; break;
9284 case V2SImode
: gen
= gen_aarch64_extv2si
; break;
9285 case V4SImode
: gen
= gen_aarch64_extv4si
; break;
9286 case V2SFmode
: gen
= gen_aarch64_extv2sf
; break;
9287 case V4SFmode
: gen
= gen_aarch64_extv4sf
; break;
9288 case V2DImode
: gen
= gen_aarch64_extv2di
; break;
9289 case V2DFmode
: gen
= gen_aarch64_extv2df
; break;
9298 /* The case where (location == 0) is a no-op for both big- and little-endian,
9299 and is removed by the mid-end at optimization levels -O1 and higher. */
9301 if (BYTES_BIG_ENDIAN
&& (location
!= 0))
9303 /* After setup, we want the high elements of the first vector (stored
9304 at the LSB end of the register), and the low elements of the second
9305 vector (stored at the MSB end of the register). So swap. */
9309 /* location != 0 (above), so safe to assume (nelt - location) < nelt. */
9310 location
= nelt
- location
;
9313 offset
= GEN_INT (location
);
9314 emit_insn (gen (d
->target
, d
->op0
, d
->op1
, offset
));
9318 /* Recognize patterns for the REV insns. */
9321 aarch64_evpc_rev (struct expand_vec_perm_d
*d
)
9323 unsigned int i
, j
, diff
, nelt
= d
->nelt
;
9324 rtx (*gen
) (rtx
, rtx
);
9326 if (!d
->one_vector_p
)
9335 case V16QImode
: gen
= gen_aarch64_rev64v16qi
; break;
9336 case V8QImode
: gen
= gen_aarch64_rev64v8qi
; break;
9344 case V16QImode
: gen
= gen_aarch64_rev32v16qi
; break;
9345 case V8QImode
: gen
= gen_aarch64_rev32v8qi
; break;
9346 case V8HImode
: gen
= gen_aarch64_rev64v8hi
; break;
9347 case V4HImode
: gen
= gen_aarch64_rev64v4hi
; break;
9355 case V16QImode
: gen
= gen_aarch64_rev16v16qi
; break;
9356 case V8QImode
: gen
= gen_aarch64_rev16v8qi
; break;
9357 case V8HImode
: gen
= gen_aarch64_rev32v8hi
; break;
9358 case V4HImode
: gen
= gen_aarch64_rev32v4hi
; break;
9359 case V4SImode
: gen
= gen_aarch64_rev64v4si
; break;
9360 case V2SImode
: gen
= gen_aarch64_rev64v2si
; break;
9361 case V4SFmode
: gen
= gen_aarch64_rev64v4sf
; break;
9362 case V2SFmode
: gen
= gen_aarch64_rev64v2sf
; break;
9371 for (i
= 0; i
< nelt
; i
+= diff
+ 1)
9372 for (j
= 0; j
<= diff
; j
+= 1)
9374 /* This is guaranteed to be true as the value of diff
9375 is 7, 3, 1 and we should have enough elements in the
9376 queue to generate this. Getting a vector mask with a
9377 value of diff other than these values implies that
9378 something is wrong by the time we get here. */
9379 gcc_assert (i
+ j
< nelt
);
9380 if (d
->perm
[i
+ j
] != i
+ diff
- j
)
9388 emit_insn (gen (d
->target
, d
->op0
));
9393 aarch64_evpc_dup (struct expand_vec_perm_d
*d
)
9395 rtx (*gen
) (rtx
, rtx
, rtx
);
9396 rtx out
= d
->target
;
9398 enum machine_mode vmode
= d
->vmode
;
9399 unsigned int i
, elt
, nelt
= d
->nelt
;
9403 for (i
= 1; i
< nelt
; i
++)
9405 if (elt
!= d
->perm
[i
])
9409 /* The generic preparation in aarch64_expand_vec_perm_const_1
9410 swaps the operand order and the permute indices if it finds
9411 d->perm[0] to be in the second operand. Thus, we can always
9412 use d->op0 and need not do any extra arithmetic to get the
9413 correct lane number. */
9415 lane
= GEN_INT (elt
); /* The pattern corrects for big-endian. */
9419 case V16QImode
: gen
= gen_aarch64_dup_lanev16qi
; break;
9420 case V8QImode
: gen
= gen_aarch64_dup_lanev8qi
; break;
9421 case V8HImode
: gen
= gen_aarch64_dup_lanev8hi
; break;
9422 case V4HImode
: gen
= gen_aarch64_dup_lanev4hi
; break;
9423 case V4SImode
: gen
= gen_aarch64_dup_lanev4si
; break;
9424 case V2SImode
: gen
= gen_aarch64_dup_lanev2si
; break;
9425 case V2DImode
: gen
= gen_aarch64_dup_lanev2di
; break;
9426 case V4SFmode
: gen
= gen_aarch64_dup_lanev4sf
; break;
9427 case V2SFmode
: gen
= gen_aarch64_dup_lanev2sf
; break;
9428 case V2DFmode
: gen
= gen_aarch64_dup_lanev2df
; break;
9433 emit_insn (gen (out
, in0
, lane
));
9438 aarch64_evpc_tbl (struct expand_vec_perm_d
*d
)
9440 rtx rperm
[MAX_VECT_LEN
], sel
;
9441 enum machine_mode vmode
= d
->vmode
;
9442 unsigned int i
, nelt
= d
->nelt
;
9447 /* Generic code will try constant permutation twice. Once with the
9448 original mode and again with the elements lowered to QImode.
9449 So wait and don't do the selector expansion ourselves. */
9450 if (vmode
!= V8QImode
&& vmode
!= V16QImode
)
9453 for (i
= 0; i
< nelt
; ++i
)
9455 int nunits
= GET_MODE_NUNITS (vmode
);
9457 /* If big-endian and two vectors we end up with a weird mixed-endian
9458 mode on NEON. Reverse the index within each word but not the word
9460 rperm
[i
] = GEN_INT (BYTES_BIG_ENDIAN
? d
->perm
[i
] ^ (nunits
- 1)
9463 sel
= gen_rtx_CONST_VECTOR (vmode
, gen_rtvec_v (nelt
, rperm
));
9464 sel
= force_reg (vmode
, sel
);
9466 aarch64_expand_vec_perm_1 (d
->target
, d
->op0
, d
->op1
, sel
);
9471 aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
)
9473 /* The pattern matching functions above are written to look for a small
9474 number to begin the sequence (0, 1, N/2). If we begin with an index
9475 from the second operand, we can swap the operands. */
9476 if (d
->perm
[0] >= d
->nelt
)
9478 unsigned i
, nelt
= d
->nelt
;
9481 gcc_assert (nelt
== (nelt
& -nelt
));
9482 for (i
= 0; i
< nelt
; ++i
)
9483 d
->perm
[i
] ^= nelt
; /* Keep the same index, but in the other vector. */
9492 if (aarch64_evpc_rev (d
))
9494 else if (aarch64_evpc_ext (d
))
9496 else if (aarch64_evpc_dup (d
))
9498 else if (aarch64_evpc_zip (d
))
9500 else if (aarch64_evpc_uzp (d
))
9502 else if (aarch64_evpc_trn (d
))
9504 return aarch64_evpc_tbl (d
);
9509 /* Expand a vec_perm_const pattern. */
9512 aarch64_expand_vec_perm_const (rtx target
, rtx op0
, rtx op1
, rtx sel
)
9514 struct expand_vec_perm_d d
;
9521 d
.vmode
= GET_MODE (target
);
9522 gcc_assert (VECTOR_MODE_P (d
.vmode
));
9523 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
9524 d
.testing_p
= false;
9526 for (i
= which
= 0; i
< nelt
; ++i
)
9528 rtx e
= XVECEXP (sel
, 0, i
);
9529 int ei
= INTVAL (e
) & (2 * nelt
- 1);
9530 which
|= (ei
< nelt
? 1 : 2);
9540 d
.one_vector_p
= false;
9541 if (!rtx_equal_p (op0
, op1
))
9544 /* The elements of PERM do not suggest that only the first operand
9545 is used, but both operands are identical. Allow easier matching
9546 of the permutation by folding the permutation into the single
9550 for (i
= 0; i
< nelt
; ++i
)
9551 d
.perm
[i
] &= nelt
- 1;
9553 d
.one_vector_p
= true;
9558 d
.one_vector_p
= true;
9562 return aarch64_expand_vec_perm_const_1 (&d
);
9566 aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
9567 const unsigned char *sel
)
9569 struct expand_vec_perm_d d
;
9570 unsigned int i
, nelt
, which
;
9574 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
9576 memcpy (d
.perm
, sel
, nelt
);
9578 /* Calculate whether all elements are in one vector. */
9579 for (i
= which
= 0; i
< nelt
; ++i
)
9581 unsigned char e
= d
.perm
[i
];
9582 gcc_assert (e
< 2 * nelt
);
9583 which
|= (e
< nelt
? 1 : 2);
9586 /* If all elements are from the second vector, reindex as if from the
9589 for (i
= 0; i
< nelt
; ++i
)
9592 /* Check whether the mask can be applied to a single vector. */
9593 d
.one_vector_p
= (which
!= 3);
9595 d
.target
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
9596 d
.op1
= d
.op0
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 2);
9597 if (!d
.one_vector_p
)
9598 d
.op1
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 3);
9601 ret
= aarch64_expand_vec_perm_const_1 (&d
);
9607 /* Implement target hook CANNOT_CHANGE_MODE_CLASS. */
9609 aarch64_cannot_change_mode_class (enum machine_mode from
,
9610 enum machine_mode to
,
9611 enum reg_class rclass
)
9613 /* Full-reg subregs are allowed on general regs or any class if they are
9615 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
)
9616 || !reg_classes_intersect_p (FP_REGS
, rclass
))
9619 /* Limited combinations of subregs are safe on FPREGs. Particularly,
9620 1. Vector Mode to Scalar mode where 1 unit of the vector is accessed.
9621 2. Scalar to Scalar for integer modes or same size float modes.
9622 3. Vector to Vector modes.
9623 4. On little-endian only, Vector-Structure to Vector modes. */
9624 if (GET_MODE_SIZE (from
) > GET_MODE_SIZE (to
))
9626 if (aarch64_vector_mode_supported_p (from
)
9627 && GET_MODE_SIZE (GET_MODE_INNER (from
)) == GET_MODE_SIZE (to
))
9630 if (GET_MODE_NUNITS (from
) == 1
9631 && GET_MODE_NUNITS (to
) == 1
9632 && (GET_MODE_CLASS (from
) == MODE_INT
9636 if (aarch64_vector_mode_supported_p (from
)
9637 && aarch64_vector_mode_supported_p (to
))
9640 /* Within an vector structure straddling multiple vector registers
9641 we are in a mixed-endian representation. As such, we can't
9642 easily change modes for BYTES_BIG_ENDIAN. Otherwise, we can
9643 switch between vectors and vector structures cheaply. */
9644 if (!BYTES_BIG_ENDIAN
)
9645 if ((aarch64_vector_mode_supported_p (from
)
9646 && aarch64_vect_struct_mode_p (to
))
9647 || (aarch64_vector_mode_supported_p (to
)
9648 && aarch64_vect_struct_mode_p (from
)))
9655 /* Implement MODES_TIEABLE_P. */
9658 aarch64_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
9660 if (GET_MODE_CLASS (mode1
) == GET_MODE_CLASS (mode2
))
9663 /* We specifically want to allow elements of "structure" modes to
9664 be tieable to the structure. This more general condition allows
9665 other rarer situations too. */
9667 && aarch64_vector_mode_p (mode1
)
9668 && aarch64_vector_mode_p (mode2
))
9674 /* Return a new RTX holding the result of moving POINTER forward by
9678 aarch64_move_pointer (rtx pointer
, int amount
)
9680 rtx next
= plus_constant (Pmode
, XEXP (pointer
, 0), amount
);
9682 return adjust_automodify_address (pointer
, GET_MODE (pointer
),
9686 /* Return a new RTX holding the result of moving POINTER forward by the
9687 size of the mode it points to. */
9690 aarch64_progress_pointer (rtx pointer
)
9692 HOST_WIDE_INT amount
= GET_MODE_SIZE (GET_MODE (pointer
));
9694 return aarch64_move_pointer (pointer
, amount
);
9697 /* Copy one MODE sized block from SRC to DST, then progress SRC and DST by
9701 aarch64_copy_one_block_and_progress_pointers (rtx
*src
, rtx
*dst
,
9702 enum machine_mode mode
)
9704 rtx reg
= gen_reg_rtx (mode
);
9706 /* "Cast" the pointers to the correct mode. */
9707 *src
= adjust_address (*src
, mode
, 0);
9708 *dst
= adjust_address (*dst
, mode
, 0);
9709 /* Emit the memcpy. */
9710 emit_move_insn (reg
, *src
);
9711 emit_move_insn (*dst
, reg
);
9712 /* Move the pointers forward. */
9713 *src
= aarch64_progress_pointer (*src
);
9714 *dst
= aarch64_progress_pointer (*dst
);
9717 /* Expand movmem, as if from a __builtin_memcpy. Return true if
9718 we succeed, otherwise return false. */
9721 aarch64_expand_movmem (rtx
*operands
)
9724 rtx dst
= operands
[0];
9725 rtx src
= operands
[1];
9727 bool speed_p
= !optimize_function_for_size_p (cfun
);
9729 /* When optimizing for size, give a better estimate of the length of a
9730 memcpy call, but use the default otherwise. */
9731 unsigned int max_instructions
= (speed_p
? 15 : AARCH64_CALL_RATIO
) / 2;
9733 /* We can't do anything smart if the amount to copy is not constant. */
9734 if (!CONST_INT_P (operands
[2]))
9737 n
= UINTVAL (operands
[2]);
9739 /* Try to keep the number of instructions low. For cases below 16 bytes we
9740 need to make at most two moves. For cases above 16 bytes it will be one
9741 move for each 16 byte chunk, then at most two additional moves. */
9742 if (((n
/ 16) + (n
% 16 ? 2 : 0)) > max_instructions
)
9745 base
= copy_to_mode_reg (Pmode
, XEXP (dst
, 0));
9746 dst
= adjust_automodify_address (dst
, VOIDmode
, base
, 0);
9748 base
= copy_to_mode_reg (Pmode
, XEXP (src
, 0));
9749 src
= adjust_automodify_address (src
, VOIDmode
, base
, 0);
9751 /* Simple cases. Copy 0-3 bytes, as (if applicable) a 2-byte, then a
9757 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, HImode
);
9762 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, QImode
);
9767 /* Copy 4-8 bytes. First a 4-byte chunk, then (if applicable) a second
9768 4-byte chunk, partially overlapping with the previously copied chunk. */
9771 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, SImode
);
9777 src
= aarch64_move_pointer (src
, move
);
9778 dst
= aarch64_move_pointer (dst
, move
);
9779 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, SImode
);
9784 /* Copy more than 8 bytes. Copy chunks of 16 bytes until we run out of
9785 them, then (if applicable) an 8-byte chunk. */
9790 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, TImode
);
9795 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, DImode
);
9800 /* Finish the final bytes of the copy. We can always do this in one
9801 instruction. We either copy the exact amount we need, or partially
9802 overlap with the previous chunk we copied and copy 8-bytes. */
9806 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, QImode
);
9808 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, HImode
);
9810 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, SImode
);
9815 src
= aarch64_move_pointer (src
, -1);
9816 dst
= aarch64_move_pointer (dst
, -1);
9817 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, SImode
);
9823 src
= aarch64_move_pointer (src
, move
);
9824 dst
= aarch64_move_pointer (dst
, move
);
9825 aarch64_copy_one_block_and_progress_pointers (&src
, &dst
, DImode
);
9832 #undef TARGET_ADDRESS_COST
9833 #define TARGET_ADDRESS_COST aarch64_address_cost
9835 /* This hook will determines whether unnamed bitfields affect the alignment
9836 of the containing structure. The hook returns true if the structure
9837 should inherit the alignment requirements of an unnamed bitfield's
9839 #undef TARGET_ALIGN_ANON_BITFIELD
9840 #define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
9842 #undef TARGET_ASM_ALIGNED_DI_OP
9843 #define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
9845 #undef TARGET_ASM_ALIGNED_HI_OP
9846 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
9848 #undef TARGET_ASM_ALIGNED_SI_OP
9849 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
9851 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9852 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
9853 hook_bool_const_tree_hwi_hwi_const_tree_true
9855 #undef TARGET_ASM_FILE_START
9856 #define TARGET_ASM_FILE_START aarch64_start_file
9858 #undef TARGET_ASM_OUTPUT_MI_THUNK
9859 #define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
9861 #undef TARGET_ASM_SELECT_RTX_SECTION
9862 #define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
9864 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
9865 #define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
9867 #undef TARGET_BUILD_BUILTIN_VA_LIST
9868 #define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
9870 #undef TARGET_CALLEE_COPIES
9871 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
9873 #undef TARGET_CAN_ELIMINATE
9874 #define TARGET_CAN_ELIMINATE aarch64_can_eliminate
9876 #undef TARGET_CANNOT_FORCE_CONST_MEM
9877 #define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
9879 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9880 #define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
9882 /* Only the least significant bit is used for initialization guard
9884 #undef TARGET_CXX_GUARD_MASK_BIT
9885 #define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
9887 #undef TARGET_C_MODE_FOR_SUFFIX
9888 #define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
9890 #ifdef TARGET_BIG_ENDIAN_DEFAULT
9891 #undef TARGET_DEFAULT_TARGET_FLAGS
9892 #define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
9895 #undef TARGET_CLASS_MAX_NREGS
9896 #define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
9898 #undef TARGET_BUILTIN_DECL
9899 #define TARGET_BUILTIN_DECL aarch64_builtin_decl
9901 #undef TARGET_EXPAND_BUILTIN
9902 #define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
9904 #undef TARGET_EXPAND_BUILTIN_VA_START
9905 #define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
9907 #undef TARGET_FOLD_BUILTIN
9908 #define TARGET_FOLD_BUILTIN aarch64_fold_builtin
9910 #undef TARGET_FUNCTION_ARG
9911 #define TARGET_FUNCTION_ARG aarch64_function_arg
9913 #undef TARGET_FUNCTION_ARG_ADVANCE
9914 #define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
9916 #undef TARGET_FUNCTION_ARG_BOUNDARY
9917 #define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
9919 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9920 #define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
9922 #undef TARGET_FUNCTION_VALUE
9923 #define TARGET_FUNCTION_VALUE aarch64_function_value
9925 #undef TARGET_FUNCTION_VALUE_REGNO_P
9926 #define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
9928 #undef TARGET_FRAME_POINTER_REQUIRED
9929 #define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
9931 #undef TARGET_GIMPLE_FOLD_BUILTIN
9932 #define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
9934 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9935 #define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
9937 #undef TARGET_INIT_BUILTINS
9938 #define TARGET_INIT_BUILTINS aarch64_init_builtins
9940 #undef TARGET_LEGITIMATE_ADDRESS_P
9941 #define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
9943 #undef TARGET_LEGITIMATE_CONSTANT_P
9944 #define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
9946 #undef TARGET_LIBGCC_CMP_RETURN_MODE
9947 #define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
9950 #define TARGET_LRA_P aarch64_lra_p
9952 #undef TARGET_MANGLE_TYPE
9953 #define TARGET_MANGLE_TYPE aarch64_mangle_type
9955 #undef TARGET_MEMORY_MOVE_COST
9956 #define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
9958 #undef TARGET_MUST_PASS_IN_STACK
9959 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
9961 /* This target hook should return true if accesses to volatile bitfields
9962 should use the narrowest mode possible. It should return false if these
9963 accesses should use the bitfield container type. */
9964 #undef TARGET_NARROW_VOLATILE_BITFIELD
9965 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
9967 #undef TARGET_OPTION_OVERRIDE
9968 #define TARGET_OPTION_OVERRIDE aarch64_override_options
9970 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
9971 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
9972 aarch64_override_options_after_change
9974 #undef TARGET_PASS_BY_REFERENCE
9975 #define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
9977 #undef TARGET_PREFERRED_RELOAD_CLASS
9978 #define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
9980 #undef TARGET_SECONDARY_RELOAD
9981 #define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
9983 #undef TARGET_SHIFT_TRUNCATION_MASK
9984 #define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
9986 #undef TARGET_SETUP_INCOMING_VARARGS
9987 #define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
9989 #undef TARGET_STRUCT_VALUE_RTX
9990 #define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
9992 #undef TARGET_REGISTER_MOVE_COST
9993 #define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
9995 #undef TARGET_RETURN_IN_MEMORY
9996 #define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
9998 #undef TARGET_RETURN_IN_MSB
9999 #define TARGET_RETURN_IN_MSB aarch64_return_in_msb
10001 #undef TARGET_RTX_COSTS
10002 #define TARGET_RTX_COSTS aarch64_rtx_costs_wrapper
10004 #undef TARGET_SCHED_ISSUE_RATE
10005 #define TARGET_SCHED_ISSUE_RATE aarch64_sched_issue_rate
10007 #undef TARGET_TRAMPOLINE_INIT
10008 #define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
10010 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10011 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
10013 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10014 #define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
10016 #undef TARGET_ARRAY_MODE_SUPPORTED_P
10017 #define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
10019 #undef TARGET_VECTORIZE_ADD_STMT_COST
10020 #define TARGET_VECTORIZE_ADD_STMT_COST aarch64_add_stmt_cost
10022 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
10023 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
10024 aarch64_builtin_vectorization_cost
10026 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
10027 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
10029 #undef TARGET_VECTORIZE_BUILTINS
10030 #define TARGET_VECTORIZE_BUILTINS
10032 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
10033 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
10034 aarch64_builtin_vectorized_function
10036 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
10037 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
10038 aarch64_autovectorize_vector_sizes
10040 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10041 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV \
10042 aarch64_atomic_assign_expand_fenv
10044 /* Section anchor support. */
10046 #undef TARGET_MIN_ANCHOR_OFFSET
10047 #define TARGET_MIN_ANCHOR_OFFSET -256
10049 /* Limit the maximum anchor offset to 4k-1, since that's the limit for a
10050 byte offset; we can do much more for larger data types, but have no way
10051 to determine the size of the access. We assume accesses are aligned. */
10052 #undef TARGET_MAX_ANCHOR_OFFSET
10053 #define TARGET_MAX_ANCHOR_OFFSET 4095
10055 #undef TARGET_VECTOR_ALIGNMENT
10056 #define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment
10058 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
10059 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
10060 aarch64_simd_vector_alignment_reachable
10062 /* vec_perm support. */
10064 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
10065 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
10066 aarch64_vectorize_vec_perm_const_ok
10069 #undef TARGET_FIXED_CONDITION_CODE_REGS
10070 #define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
10072 #undef TARGET_FLAGS_REGNUM
10073 #define TARGET_FLAGS_REGNUM CC_REGNUM
10075 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
10076 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
10078 struct gcc_target targetm
= TARGET_INITIALIZER
;
10080 #include "gt-aarch64.h"