]>
Commit | Line | Data |
---|---|---|
43e9d192 | 1 | /* Machine description for AArch64 architecture. |
5624e564 | 2 | Copyright (C) 2009-2015 Free Software Foundation, Inc. |
43e9d192 IB |
3 | Contributed by ARM Ltd. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | ||
22 | #ifndef GCC_AARCH64_PROTOS_H | |
23 | #define GCC_AARCH64_PROTOS_H | |
24 | ||
43e9d192 IB |
25 | /* |
26 | SYMBOL_CONTEXT_ADR | |
27 | The symbol is used in a load-address operation. | |
28 | SYMBOL_CONTEXT_MEM | |
29 | The symbol is used as the address in a MEM. | |
30 | */ | |
31 | enum aarch64_symbol_context | |
32 | { | |
33 | SYMBOL_CONTEXT_MEM, | |
34 | SYMBOL_CONTEXT_ADR | |
35 | }; | |
36 | ||
37 | /* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through | |
38 | high and lo relocs that calculate the base address using a PC | |
39 | relative reloc. | |
40 | So to get the address of foo, we generate | |
41 | adrp x0, foo | |
42 | add x0, x0, :lo12:foo | |
43 | ||
44 | To load or store something to foo, we could use the corresponding | |
45 | load store variants that generate an | |
46 | ldr x0, [x0,:lo12:foo] | |
47 | or | |
48 | str x1, [x0, :lo12:foo] | |
49 | ||
50 | This corresponds to the small code model of the compiler. | |
51 | ||
52 | SYMBOL_SMALL_GOT: Similar to the one above but this | |
53 | gives us the GOT entry of the symbol being referred to : | |
54 | Thus calculating the GOT entry for foo is done using the | |
55 | following sequence of instructions. The ADRP instruction | |
56 | gets us to the page containing the GOT entry of the symbol | |
57 | and the got_lo12 gets us the actual offset in it. | |
58 | ||
59 | adrp x0, :got:foo | |
60 | ldr x0, [x0, :gotoff_lo12:foo] | |
61 | ||
62 | This corresponds to the small PIC model of the compiler. | |
63 | ||
64 | SYMBOL_SMALL_TLSGD | |
65 | SYMBOL_SMALL_TLSDESC | |
66 | SYMBOL_SMALL_GOTTPREL | |
67 | SYMBOL_SMALL_TPREL | |
68 | Each of of these represents a thread-local symbol, and corresponds to the | |
69 | thread local storage relocation operator for the symbol being referred to. | |
70 | ||
c822f852 MS |
71 | SYMBOL_TINY_ABSOLUTE |
72 | ||
73 | Generate symbol accesses as a PC relative address using a single | |
74 | instruction. To compute the address of symbol foo, we generate: | |
75 | ||
76 | ADR x0, foo | |
77 | ||
87dd8ab0 MS |
78 | SYMBOL_TINY_GOT |
79 | ||
80 | Generate symbol accesses via the GOT using a single PC relative | |
81 | instruction. To compute the address of symbol foo, we generate: | |
82 | ||
83 | ldr t0, :got:foo | |
84 | ||
85 | The value of foo can subsequently read using: | |
86 | ||
87 | ldrb t0, [t0] | |
88 | ||
43e9d192 IB |
89 | SYMBOL_FORCE_TO_MEM : Global variables are addressed using |
90 | constant pool. All variable addresses are spilled into constant | |
91 | pools. The constant pools themselves are addressed using PC | |
92 | relative accesses. This only works for the large code model. | |
93 | */ | |
94 | enum aarch64_symbol_type | |
95 | { | |
96 | SYMBOL_SMALL_ABSOLUTE, | |
97 | SYMBOL_SMALL_GOT, | |
98 | SYMBOL_SMALL_TLSGD, | |
99 | SYMBOL_SMALL_TLSDESC, | |
100 | SYMBOL_SMALL_GOTTPREL, | |
101 | SYMBOL_SMALL_TPREL, | |
a5350ddc | 102 | SYMBOL_TINY_ABSOLUTE, |
87dd8ab0 | 103 | SYMBOL_TINY_GOT, |
43e9d192 IB |
104 | SYMBOL_FORCE_TO_MEM |
105 | }; | |
106 | ||
107 | /* A set of tuning parameters contains references to size and time | |
108 | cost models and vectors for address cost calculations, register | |
109 | move costs and memory move costs. */ | |
110 | ||
67747367 JG |
111 | /* Scaled addressing modes can vary cost depending on the mode of the |
112 | value to be loaded/stored. QImode values cannot use scaled | |
113 | addressing modes. */ | |
114 | ||
115 | struct scale_addr_mode_cost | |
116 | { | |
117 | const int hi; | |
118 | const int si; | |
119 | const int di; | |
120 | const int ti; | |
121 | }; | |
122 | ||
43e9d192 IB |
123 | /* Additional cost for addresses. */ |
124 | struct cpu_addrcost_table | |
125 | { | |
67747367 | 126 | const struct scale_addr_mode_cost addr_scale_costs; |
43e9d192 IB |
127 | const int pre_modify; |
128 | const int post_modify; | |
129 | const int register_offset; | |
130 | const int register_extend; | |
131 | const int imm_offset; | |
132 | }; | |
133 | ||
134 | /* Additional costs for register copies. Cost is for one register. */ | |
135 | struct cpu_regmove_cost | |
136 | { | |
137 | const int GP2GP; | |
138 | const int GP2FP; | |
139 | const int FP2GP; | |
140 | const int FP2FP; | |
141 | }; | |
142 | ||
8990e73a TB |
143 | /* Cost for vector insn classes. */ |
144 | struct cpu_vector_cost | |
145 | { | |
146 | const int scalar_stmt_cost; /* Cost of any scalar operation, | |
147 | excluding load and store. */ | |
148 | const int scalar_load_cost; /* Cost of scalar load. */ | |
149 | const int scalar_store_cost; /* Cost of scalar store. */ | |
150 | const int vec_stmt_cost; /* Cost of any vector operation, | |
151 | excluding load, store, | |
152 | vector-to-scalar and | |
153 | scalar-to-vector operation. */ | |
154 | const int vec_to_scalar_cost; /* Cost of vec-to-scalar operation. */ | |
155 | const int scalar_to_vec_cost; /* Cost of scalar-to-vector | |
156 | operation. */ | |
157 | const int vec_align_load_cost; /* Cost of aligned vector load. */ | |
158 | const int vec_unalign_load_cost; /* Cost of unaligned vector load. */ | |
159 | const int vec_unalign_store_cost; /* Cost of unaligned vector store. */ | |
160 | const int vec_store_cost; /* Cost of vector store. */ | |
161 | const int cond_taken_branch_cost; /* Cost of taken branch. */ | |
162 | const int cond_not_taken_branch_cost; /* Cost of not taken branch. */ | |
163 | }; | |
164 | ||
43e9d192 IB |
165 | struct tune_params |
166 | { | |
73250c4c | 167 | const struct cpu_cost_table *const insn_extra_cost; |
43e9d192 IB |
168 | const struct cpu_addrcost_table *const addr_cost; |
169 | const struct cpu_regmove_cost *const regmove_cost; | |
8990e73a | 170 | const struct cpu_vector_cost *const vec_costs; |
43e9d192 | 171 | const int memmov_cost; |
d126a4ae | 172 | const int issue_rate; |
6a569cdd | 173 | const unsigned int fuseable_ops; |
0b82a5a2 WD |
174 | const int function_align; |
175 | const int jump_align; | |
176 | const int loop_align; | |
cee66c68 WD |
177 | const int int_reassoc_width; |
178 | const int fp_reassoc_width; | |
179 | const int vec_reassoc_width; | |
43e9d192 IB |
180 | }; |
181 | ||
182 | HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned); | |
cd5660ab | 183 | int aarch64_get_condition_code (rtx); |
ef4bddc2 RS |
184 | bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode); |
185 | bool aarch64_cannot_change_mode_class (machine_mode, | |
186 | machine_mode, | |
69675d50 | 187 | enum reg_class); |
da4f13a4 MS |
188 | enum aarch64_symbol_type |
189 | aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context); | |
ddeabd3e | 190 | bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT); |
43e9d192 | 191 | bool aarch64_constant_address_p (rtx); |
e2c75eea | 192 | bool aarch64_expand_movmem (rtx *); |
3520f7cc | 193 | bool aarch64_float_const_zero_rtx_p (rtx); |
43e9d192 IB |
194 | bool aarch64_function_arg_regno_p (unsigned); |
195 | bool aarch64_gen_movmemqi (rtx *); | |
0ac198d3 | 196 | bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *); |
ef4bddc2 | 197 | bool aarch64_is_extend_from_extract (machine_mode, rtx, rtx); |
43e9d192 IB |
198 | bool aarch64_is_long_call_p (rtx); |
199 | bool aarch64_label_mentioned_p (rtx); | |
200 | bool aarch64_legitimate_pic_operand_p (rtx); | |
ef4bddc2 RS |
201 | bool aarch64_modes_tieable_p (machine_mode mode1, |
202 | machine_mode mode2); | |
203 | bool aarch64_move_imm (HOST_WIDE_INT, machine_mode); | |
83f8c414 | 204 | bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context, |
ef4bddc2 | 205 | machine_mode); |
668046d1 DS |
206 | int aarch64_simd_attr_length_rglist (enum machine_mode); |
207 | rtx aarch64_reverse_mask (enum machine_mode); | |
ef4bddc2 RS |
208 | bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT); |
209 | char *aarch64_output_scalar_simd_mov_immediate (rtx, machine_mode); | |
210 | char *aarch64_output_simd_mov_immediate (rtx, machine_mode, unsigned); | |
211 | bool aarch64_pad_arg_upward (machine_mode, const_tree); | |
212 | bool aarch64_pad_reg_upward (machine_mode, const_tree, bool); | |
43e9d192 IB |
213 | bool aarch64_regno_ok_for_base_p (int, bool); |
214 | bool aarch64_regno_ok_for_index_p (int, bool); | |
ef4bddc2 | 215 | bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode, |
988fa693 | 216 | bool high); |
ef4bddc2 RS |
217 | bool aarch64_simd_imm_scalar_p (rtx x, machine_mode mode); |
218 | bool aarch64_simd_imm_zero_p (rtx, machine_mode); | |
219 | bool aarch64_simd_scalar_immediate_valid_for_move (rtx, machine_mode); | |
220 | bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool); | |
221 | bool aarch64_simd_valid_immediate (rtx, machine_mode, bool, | |
48063b9d | 222 | struct simd_immediate_info *); |
43e9d192 | 223 | bool aarch64_symbolic_address_p (rtx); |
43e9d192 | 224 | bool aarch64_uimm12_shift (HOST_WIDE_INT); |
4f942779 | 225 | bool aarch64_use_return_insn_p (void); |
f9d53c27 | 226 | const char *aarch64_mangle_builtin_type (const_tree); |
43e9d192 | 227 | const char *aarch64_output_casesi (rtx *); |
682287fb JG |
228 | const char *aarch64_rewrite_selected_cpu (const char *name); |
229 | ||
f8b756b7 | 230 | enum aarch64_symbol_type aarch64_classify_symbol (rtx, rtx, |
43e9d192 IB |
231 | enum aarch64_symbol_context); |
232 | enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx); | |
233 | enum reg_class aarch64_regno_regclass (unsigned); | |
234 | int aarch64_asm_preferred_eh_data_format (int, int); | |
ef4bddc2 RS |
235 | machine_mode aarch64_hard_regno_caller_save_mode (unsigned, unsigned, |
236 | machine_mode); | |
237 | int aarch64_hard_regno_mode_ok (unsigned, machine_mode); | |
238 | int aarch64_hard_regno_nregs (unsigned, machine_mode); | |
647d790d | 239 | int aarch64_simd_attr_length_move (rtx_insn *); |
43e9d192 IB |
240 | int aarch64_uxt_size (int, HOST_WIDE_INT); |
241 | rtx aarch64_final_eh_return_addr (void); | |
ef4bddc2 | 242 | rtx aarch64_legitimize_reload_address (rtx *, machine_mode, int, int, int); |
43e9d192 IB |
243 | const char *aarch64_output_move_struct (rtx *operands); |
244 | rtx aarch64_return_addr (int, rtx); | |
ef4bddc2 | 245 | rtx aarch64_simd_gen_const_vector_dup (machine_mode, int); |
43e9d192 | 246 | bool aarch64_simd_mem_operand_p (rtx); |
ef4bddc2 | 247 | rtx aarch64_simd_vect_par_cnst_half (machine_mode, bool); |
43e9d192 | 248 | rtx aarch64_tls_get_addr (void); |
9697e620 | 249 | tree aarch64_fold_builtin (tree, int, tree *, bool); |
43e9d192 IB |
250 | unsigned aarch64_dbx_register_number (unsigned); |
251 | unsigned aarch64_trampoline_size (void); | |
43e9d192 IB |
252 | void aarch64_asm_output_labelref (FILE *, const char *); |
253 | void aarch64_elf_asm_named_section (const char *, unsigned, tree); | |
254 | void aarch64_expand_epilogue (bool); | |
255 | void aarch64_expand_mov_immediate (rtx, rtx); | |
256 | void aarch64_expand_prologue (void); | |
4369c11e | 257 | void aarch64_expand_vector_init (rtx, rtx); |
43e9d192 IB |
258 | void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx, |
259 | const_tree, unsigned); | |
260 | void aarch64_init_expanders (void); | |
261 | void aarch64_print_operand (FILE *, rtx, char); | |
262 | void aarch64_print_operand_address (FILE *, rtx); | |
d07a3fed | 263 | void aarch64_emit_call_insn (rtx); |
43e9d192 IB |
264 | |
265 | /* Initialize builtins for SIMD intrinsics. */ | |
266 | void init_aarch64_simd_builtins (void); | |
267 | ||
2d8c6dc1 | 268 | void aarch64_simd_emit_reg_reg_move (rtx *, enum machine_mode, unsigned int); |
43e9d192 IB |
269 | |
270 | /* Emit code to place a AdvSIMD pair result in memory locations (with equal | |
271 | registers). */ | |
ef4bddc2 | 272 | void aarch64_simd_emit_pair_result_insn (machine_mode, |
43e9d192 IB |
273 | rtx (*intfn) (rtx, rtx, rtx), rtx, |
274 | rtx); | |
275 | ||
276 | /* Expand builtins for SIMD intrinsics. */ | |
277 | rtx aarch64_simd_expand_builtin (int, tree, rtx); | |
278 | ||
46ed6024 | 279 | void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree); |
43e9d192 | 280 | |
43e9d192 IB |
281 | void aarch64_split_128bit_move (rtx, rtx); |
282 | ||
283 | bool aarch64_split_128bit_move_p (rtx, rtx); | |
284 | ||
8b033a8a SN |
285 | void aarch64_split_simd_combine (rtx, rtx, rtx); |
286 | ||
fd4842cd SN |
287 | void aarch64_split_simd_move (rtx, rtx); |
288 | ||
3520f7cc JG |
289 | /* Check for a legitimate floating point constant for FMOV. */ |
290 | bool aarch64_float_const_representable_p (rtx); | |
291 | ||
43e9d192 IB |
292 | #if defined (RTX_CODE) |
293 | ||
ef4bddc2 RS |
294 | bool aarch64_legitimate_address_p (machine_mode, rtx, RTX_CODE, bool); |
295 | machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx); | |
43e9d192 | 296 | rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx); |
a5bc806c | 297 | rtx aarch64_load_tp (rtx); |
43e9d192 | 298 | |
0462169c SN |
299 | void aarch64_expand_compare_and_swap (rtx op[]); |
300 | void aarch64_split_compare_and_swap (rtx op[]); | |
301 | void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx); | |
302 | ||
350013bc | 303 | bool aarch64_gen_adjusted_ldpstp (rtx *, bool, enum machine_mode, RTX_CODE); |
43e9d192 IB |
304 | #endif /* RTX_CODE */ |
305 | ||
342be7f7 JG |
306 | void aarch64_init_builtins (void); |
307 | rtx aarch64_expand_builtin (tree exp, | |
308 | rtx target, | |
309 | rtx subtarget ATTRIBUTE_UNUSED, | |
ef4bddc2 | 310 | machine_mode mode ATTRIBUTE_UNUSED, |
342be7f7 | 311 | int ignore ATTRIBUTE_UNUSED); |
119103ca | 312 | tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED); |
342be7f7 | 313 | |
42fc9a7f JG |
314 | tree |
315 | aarch64_builtin_vectorized_function (tree fndecl, | |
316 | tree type_out, | |
317 | tree type_in); | |
318 | ||
88b08073 JG |
319 | extern void aarch64_split_combinev16qi (rtx operands[3]); |
320 | extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel); | |
75cf1494 KT |
321 | extern bool aarch64_madd_needs_nop (rtx_insn *); |
322 | extern void aarch64_final_prescan_insn (rtx_insn *); | |
88b08073 JG |
323 | extern bool |
324 | aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel); | |
aa87aced | 325 | void aarch64_atomic_assign_expand_fenv (tree *, tree *, tree *); |
cf670503 | 326 | int aarch64_ccmp_mode_to_code (enum machine_mode mode); |
350013bc BC |
327 | |
328 | bool extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset); | |
329 | bool aarch64_operands_ok_for_ldpstp (rtx *, bool, enum machine_mode); | |
330 | bool aarch64_operands_adjust_ok_for_ldpstp (rtx *, bool, enum machine_mode); | |
43e9d192 | 331 | #endif /* GCC_AARCH64_PROTOS_H */ |