]>
Commit | Line | Data |
---|---|---|
43e9d192 | 1 | /* Machine description for AArch64 architecture. |
d1e082c2 | 2 | Copyright (C) 2009-2013 Free Software Foundation, Inc. |
43e9d192 IB |
3 | Contributed by ARM Ltd. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
24 | #include "tm.h" | |
25 | #include "insn-codes.h" | |
26 | #include "rtl.h" | |
27 | #include "insn-attr.h" | |
28 | #include "tree.h" | |
29 | #include "regs.h" | |
30 | #include "df.h" | |
31 | #include "hard-reg-set.h" | |
32 | #include "output.h" | |
33 | #include "expr.h" | |
34 | #include "reload.h" | |
35 | #include "toplev.h" | |
36 | #include "target.h" | |
37 | #include "target-def.h" | |
38 | #include "targhooks.h" | |
39 | #include "ggc.h" | |
40 | #include "function.h" | |
41 | #include "tm_p.h" | |
42 | #include "recog.h" | |
43 | #include "langhooks.h" | |
44 | #include "diagnostic-core.h" | |
45 | #include "gimple.h" | |
46 | #include "optabs.h" | |
47 | #include "dwarf2.h" | |
48 | ||
49 | /* Classifies an address. | |
50 | ||
51 | ADDRESS_REG_IMM | |
52 | A simple base register plus immediate offset. | |
53 | ||
54 | ADDRESS_REG_WB | |
55 | A base register indexed by immediate offset with writeback. | |
56 | ||
57 | ADDRESS_REG_REG | |
58 | A base register indexed by (optionally scaled) register. | |
59 | ||
60 | ADDRESS_REG_UXTW | |
61 | A base register indexed by (optionally scaled) zero-extended register. | |
62 | ||
63 | ADDRESS_REG_SXTW | |
64 | A base register indexed by (optionally scaled) sign-extended register. | |
65 | ||
66 | ADDRESS_LO_SUM | |
67 | A LO_SUM rtx with a base register and "LO12" symbol relocation. | |
68 | ||
69 | ADDRESS_SYMBOLIC: | |
70 | A constant symbolic address, in pc-relative literal pool. */ | |
71 | ||
72 | enum aarch64_address_type { | |
73 | ADDRESS_REG_IMM, | |
74 | ADDRESS_REG_WB, | |
75 | ADDRESS_REG_REG, | |
76 | ADDRESS_REG_UXTW, | |
77 | ADDRESS_REG_SXTW, | |
78 | ADDRESS_LO_SUM, | |
79 | ADDRESS_SYMBOLIC | |
80 | }; | |
81 | ||
82 | struct aarch64_address_info { | |
83 | enum aarch64_address_type type; | |
84 | rtx base; | |
85 | rtx offset; | |
86 | int shift; | |
87 | enum aarch64_symbol_type symbol_type; | |
88 | }; | |
89 | ||
90 | /* The current code model. */ | |
91 | enum aarch64_code_model aarch64_cmodel; | |
92 | ||
93 | #ifdef HAVE_AS_TLS | |
94 | #undef TARGET_HAVE_TLS | |
95 | #define TARGET_HAVE_TLS 1 | |
96 | #endif | |
97 | ||
98 | static bool aarch64_composite_type_p (const_tree, enum machine_mode); | |
99 | static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode, | |
100 | const_tree, | |
101 | enum machine_mode *, int *, | |
102 | bool *); | |
103 | static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED; | |
104 | static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED; | |
43e9d192 IB |
105 | static void aarch64_override_options_after_change (void); |
106 | static int aarch64_simd_valid_immediate (rtx, enum machine_mode, int, rtx *, | |
107 | int *, unsigned char *, int *, int *); | |
108 | static bool aarch64_vector_mode_supported_p (enum machine_mode); | |
109 | static unsigned bit_count (unsigned HOST_WIDE_INT); | |
110 | static bool aarch64_const_vec_all_same_int_p (rtx, | |
111 | HOST_WIDE_INT, HOST_WIDE_INT); | |
112 | ||
88b08073 JG |
113 | static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode, |
114 | const unsigned char *sel); | |
115 | ||
43e9d192 IB |
116 | /* The processor for which instructions should be scheduled. */ |
117 | enum aarch64_processor aarch64_tune = generic; | |
118 | ||
119 | /* The current tuning set. */ | |
120 | const struct tune_params *aarch64_tune_params; | |
121 | ||
122 | /* Mask to specify which instructions we are allowed to generate. */ | |
123 | unsigned long aarch64_isa_flags = 0; | |
124 | ||
125 | /* Mask to specify which instruction scheduling options should be used. */ | |
126 | unsigned long aarch64_tune_flags = 0; | |
127 | ||
128 | /* Tuning parameters. */ | |
129 | ||
130 | #if HAVE_DESIGNATED_INITIALIZERS | |
131 | #define NAMED_PARAM(NAME, VAL) .NAME = (VAL) | |
132 | #else | |
133 | #define NAMED_PARAM(NAME, VAL) (VAL) | |
134 | #endif | |
135 | ||
136 | #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 | |
137 | __extension__ | |
138 | #endif | |
139 | static const struct cpu_rtx_cost_table generic_rtx_cost_table = | |
140 | { | |
141 | NAMED_PARAM (memory_load, COSTS_N_INSNS (1)), | |
142 | NAMED_PARAM (memory_store, COSTS_N_INSNS (0)), | |
143 | NAMED_PARAM (register_shift, COSTS_N_INSNS (1)), | |
144 | NAMED_PARAM (int_divide, COSTS_N_INSNS (6)), | |
145 | NAMED_PARAM (float_divide, COSTS_N_INSNS (2)), | |
146 | NAMED_PARAM (double_divide, COSTS_N_INSNS (6)), | |
147 | NAMED_PARAM (int_multiply, COSTS_N_INSNS (1)), | |
148 | NAMED_PARAM (int_multiply_extend, COSTS_N_INSNS (1)), | |
149 | NAMED_PARAM (int_multiply_add, COSTS_N_INSNS (1)), | |
150 | NAMED_PARAM (int_multiply_extend_add, COSTS_N_INSNS (1)), | |
151 | NAMED_PARAM (float_multiply, COSTS_N_INSNS (0)), | |
152 | NAMED_PARAM (double_multiply, COSTS_N_INSNS (1)) | |
153 | }; | |
154 | ||
155 | #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 | |
156 | __extension__ | |
157 | #endif | |
158 | static const struct cpu_addrcost_table generic_addrcost_table = | |
159 | { | |
160 | NAMED_PARAM (pre_modify, 0), | |
161 | NAMED_PARAM (post_modify, 0), | |
162 | NAMED_PARAM (register_offset, 0), | |
163 | NAMED_PARAM (register_extend, 0), | |
164 | NAMED_PARAM (imm_offset, 0) | |
165 | }; | |
166 | ||
167 | #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 | |
168 | __extension__ | |
169 | #endif | |
170 | static const struct cpu_regmove_cost generic_regmove_cost = | |
171 | { | |
172 | NAMED_PARAM (GP2GP, 1), | |
173 | NAMED_PARAM (GP2FP, 2), | |
174 | NAMED_PARAM (FP2GP, 2), | |
175 | /* We currently do not provide direct support for TFmode Q->Q move. | |
176 | Therefore we need to raise the cost above 2 in order to have | |
177 | reload handle the situation. */ | |
178 | NAMED_PARAM (FP2FP, 4) | |
179 | }; | |
180 | ||
181 | #if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 | |
182 | __extension__ | |
183 | #endif | |
184 | static const struct tune_params generic_tunings = | |
185 | { | |
186 | &generic_rtx_cost_table, | |
187 | &generic_addrcost_table, | |
188 | &generic_regmove_cost, | |
189 | NAMED_PARAM (memmov_cost, 4) | |
190 | }; | |
191 | ||
192 | /* A processor implementing AArch64. */ | |
193 | struct processor | |
194 | { | |
195 | const char *const name; | |
196 | enum aarch64_processor core; | |
197 | const char *arch; | |
198 | const unsigned long flags; | |
199 | const struct tune_params *const tune; | |
200 | }; | |
201 | ||
202 | /* Processor cores implementing AArch64. */ | |
203 | static const struct processor all_cores[] = | |
204 | { | |
205 | #define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \ | |
206 | {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings}, | |
207 | #include "aarch64-cores.def" | |
208 | #undef AARCH64_CORE | |
209 | {"generic", generic, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings}, | |
210 | {NULL, aarch64_none, NULL, 0, NULL} | |
211 | }; | |
212 | ||
213 | /* Architectures implementing AArch64. */ | |
214 | static const struct processor all_architectures[] = | |
215 | { | |
216 | #define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \ | |
217 | {NAME, CORE, #ARCH, FLAGS, NULL}, | |
218 | #include "aarch64-arches.def" | |
219 | #undef AARCH64_ARCH | |
220 | {"generic", generic, "8", AARCH64_FL_FOR_ARCH8, NULL}, | |
221 | {NULL, aarch64_none, NULL, 0, NULL} | |
222 | }; | |
223 | ||
224 | /* Target specification. These are populated as commandline arguments | |
225 | are processed, or NULL if not specified. */ | |
226 | static const struct processor *selected_arch; | |
227 | static const struct processor *selected_cpu; | |
228 | static const struct processor *selected_tune; | |
229 | ||
230 | #define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0) | |
231 | ||
232 | /* An ISA extension in the co-processor and main instruction set space. */ | |
233 | struct aarch64_option_extension | |
234 | { | |
235 | const char *const name; | |
236 | const unsigned long flags_on; | |
237 | const unsigned long flags_off; | |
238 | }; | |
239 | ||
240 | /* ISA extensions in AArch64. */ | |
241 | static const struct aarch64_option_extension all_extensions[] = | |
242 | { | |
243 | #define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \ | |
244 | {NAME, FLAGS_ON, FLAGS_OFF}, | |
245 | #include "aarch64-option-extensions.def" | |
246 | #undef AARCH64_OPT_EXTENSION | |
247 | {NULL, 0, 0} | |
248 | }; | |
249 | ||
250 | /* Used to track the size of an address when generating a pre/post | |
251 | increment address. */ | |
252 | static enum machine_mode aarch64_memory_reference_mode; | |
253 | ||
254 | /* Used to force GTY into this file. */ | |
255 | static GTY(()) int gty_dummy; | |
256 | ||
257 | /* A table of valid AArch64 "bitmask immediate" values for | |
258 | logical instructions. */ | |
259 | ||
260 | #define AARCH64_NUM_BITMASKS 5334 | |
261 | static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS]; | |
262 | ||
263 | /* Did we set flag_omit_frame_pointer just so | |
264 | aarch64_frame_pointer_required would be called? */ | |
265 | static bool faked_omit_frame_pointer; | |
266 | ||
267 | typedef enum aarch64_cond_code | |
268 | { | |
269 | AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL, | |
270 | AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT, | |
271 | AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV | |
272 | } | |
273 | aarch64_cc; | |
274 | ||
275 | #define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1)) | |
276 | ||
277 | /* The condition codes of the processor, and the inverse function. */ | |
278 | static const char * const aarch64_condition_codes[] = | |
279 | { | |
280 | "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", | |
281 | "hi", "ls", "ge", "lt", "gt", "le", "al", "nv" | |
282 | }; | |
283 | ||
284 | /* Provide a mapping from gcc register numbers to dwarf register numbers. */ | |
285 | unsigned | |
286 | aarch64_dbx_register_number (unsigned regno) | |
287 | { | |
288 | if (GP_REGNUM_P (regno)) | |
289 | return AARCH64_DWARF_R0 + regno - R0_REGNUM; | |
290 | else if (regno == SP_REGNUM) | |
291 | return AARCH64_DWARF_SP; | |
292 | else if (FP_REGNUM_P (regno)) | |
293 | return AARCH64_DWARF_V0 + regno - V0_REGNUM; | |
294 | ||
295 | /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no | |
296 | equivalent DWARF register. */ | |
297 | return DWARF_FRAME_REGISTERS; | |
298 | } | |
299 | ||
300 | /* Return TRUE if MODE is any of the large INT modes. */ | |
301 | static bool | |
302 | aarch64_vect_struct_mode_p (enum machine_mode mode) | |
303 | { | |
304 | return mode == OImode || mode == CImode || mode == XImode; | |
305 | } | |
306 | ||
307 | /* Return TRUE if MODE is any of the vector modes. */ | |
308 | static bool | |
309 | aarch64_vector_mode_p (enum machine_mode mode) | |
310 | { | |
311 | return aarch64_vector_mode_supported_p (mode) | |
312 | || aarch64_vect_struct_mode_p (mode); | |
313 | } | |
314 | ||
315 | /* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */ | |
316 | static bool | |
317 | aarch64_array_mode_supported_p (enum machine_mode mode, | |
318 | unsigned HOST_WIDE_INT nelems) | |
319 | { | |
320 | if (TARGET_SIMD | |
321 | && AARCH64_VALID_SIMD_QREG_MODE (mode) | |
322 | && (nelems >= 2 && nelems <= 4)) | |
323 | return true; | |
324 | ||
325 | return false; | |
326 | } | |
327 | ||
328 | /* Implement HARD_REGNO_NREGS. */ | |
329 | ||
330 | int | |
331 | aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode) | |
332 | { | |
333 | switch (aarch64_regno_regclass (regno)) | |
334 | { | |
335 | case FP_REGS: | |
336 | case FP_LO_REGS: | |
337 | return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG; | |
338 | default: | |
339 | return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; | |
340 | } | |
341 | gcc_unreachable (); | |
342 | } | |
343 | ||
344 | /* Implement HARD_REGNO_MODE_OK. */ | |
345 | ||
346 | int | |
347 | aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode) | |
348 | { | |
349 | if (GET_MODE_CLASS (mode) == MODE_CC) | |
350 | return regno == CC_REGNUM; | |
351 | ||
352 | if (regno == SP_REGNUM || regno == FRAME_POINTER_REGNUM | |
353 | || regno == ARG_POINTER_REGNUM) | |
354 | return mode == Pmode; | |
355 | ||
356 | if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode)) | |
357 | return 1; | |
358 | ||
359 | if (FP_REGNUM_P (regno)) | |
360 | { | |
361 | if (aarch64_vect_struct_mode_p (mode)) | |
362 | return | |
363 | (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM; | |
364 | else | |
365 | return 1; | |
366 | } | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
371 | /* Return true if calls to DECL should be treated as | |
372 | long-calls (ie called via a register). */ | |
373 | static bool | |
374 | aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED) | |
375 | { | |
376 | return false; | |
377 | } | |
378 | ||
379 | /* Return true if calls to symbol-ref SYM should be treated as | |
380 | long-calls (ie called via a register). */ | |
381 | bool | |
382 | aarch64_is_long_call_p (rtx sym) | |
383 | { | |
384 | return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym)); | |
385 | } | |
386 | ||
387 | /* Return true if the offsets to a zero/sign-extract operation | |
388 | represent an expression that matches an extend operation. The | |
389 | operands represent the paramters from | |
390 | ||
391 | (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)). */ | |
392 | bool | |
393 | aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm, | |
394 | rtx extract_imm) | |
395 | { | |
396 | HOST_WIDE_INT mult_val, extract_val; | |
397 | ||
398 | if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm)) | |
399 | return false; | |
400 | ||
401 | mult_val = INTVAL (mult_imm); | |
402 | extract_val = INTVAL (extract_imm); | |
403 | ||
404 | if (extract_val > 8 | |
405 | && extract_val < GET_MODE_BITSIZE (mode) | |
406 | && exact_log2 (extract_val & ~7) > 0 | |
407 | && (extract_val & 7) <= 4 | |
408 | && mult_val == (1 << (extract_val & 7))) | |
409 | return true; | |
410 | ||
411 | return false; | |
412 | } | |
413 | ||
414 | /* Emit an insn that's a simple single-set. Both the operands must be | |
415 | known to be valid. */ | |
416 | inline static rtx | |
417 | emit_set_insn (rtx x, rtx y) | |
418 | { | |
419 | return emit_insn (gen_rtx_SET (VOIDmode, x, y)); | |
420 | } | |
421 | ||
422 | /* X and Y are two things to compare using CODE. Emit the compare insn and | |
423 | return the rtx for register 0 in the proper mode. */ | |
424 | rtx | |
425 | aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y) | |
426 | { | |
427 | enum machine_mode mode = SELECT_CC_MODE (code, x, y); | |
428 | rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM); | |
429 | ||
430 | emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y)); | |
431 | return cc_reg; | |
432 | } | |
433 | ||
434 | /* Build the SYMBOL_REF for __tls_get_addr. */ | |
435 | ||
436 | static GTY(()) rtx tls_get_addr_libfunc; | |
437 | ||
438 | rtx | |
439 | aarch64_tls_get_addr (void) | |
440 | { | |
441 | if (!tls_get_addr_libfunc) | |
442 | tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr"); | |
443 | return tls_get_addr_libfunc; | |
444 | } | |
445 | ||
446 | /* Return the TLS model to use for ADDR. */ | |
447 | ||
448 | static enum tls_model | |
449 | tls_symbolic_operand_type (rtx addr) | |
450 | { | |
451 | enum tls_model tls_kind = TLS_MODEL_NONE; | |
452 | rtx sym, addend; | |
453 | ||
454 | if (GET_CODE (addr) == CONST) | |
455 | { | |
456 | split_const (addr, &sym, &addend); | |
457 | if (GET_CODE (sym) == SYMBOL_REF) | |
458 | tls_kind = SYMBOL_REF_TLS_MODEL (sym); | |
459 | } | |
460 | else if (GET_CODE (addr) == SYMBOL_REF) | |
461 | tls_kind = SYMBOL_REF_TLS_MODEL (addr); | |
462 | ||
463 | return tls_kind; | |
464 | } | |
465 | ||
466 | /* We'll allow lo_sum's in addresses in our legitimate addresses | |
467 | so that combine would take care of combining addresses where | |
468 | necessary, but for generation purposes, we'll generate the address | |
469 | as : | |
470 | RTL Absolute | |
471 | tmp = hi (symbol_ref); adrp x1, foo | |
472 | dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo | |
473 | nop | |
474 | ||
475 | PIC TLS | |
476 | adrp x1, :got:foo adrp tmp, :tlsgd:foo | |
477 | ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo | |
478 | bl __tls_get_addr | |
479 | nop | |
480 | ||
481 | Load TLS symbol, depending on TLS mechanism and TLS access model. | |
482 | ||
483 | Global Dynamic - Traditional TLS: | |
484 | adrp tmp, :tlsgd:imm | |
485 | add dest, tmp, #:tlsgd_lo12:imm | |
486 | bl __tls_get_addr | |
487 | ||
488 | Global Dynamic - TLS Descriptors: | |
489 | adrp dest, :tlsdesc:imm | |
490 | ldr tmp, [dest, #:tlsdesc_lo12:imm] | |
491 | add dest, dest, #:tlsdesc_lo12:imm | |
492 | blr tmp | |
493 | mrs tp, tpidr_el0 | |
494 | add dest, dest, tp | |
495 | ||
496 | Initial Exec: | |
497 | mrs tp, tpidr_el0 | |
498 | adrp tmp, :gottprel:imm | |
499 | ldr dest, [tmp, #:gottprel_lo12:imm] | |
500 | add dest, dest, tp | |
501 | ||
502 | Local Exec: | |
503 | mrs tp, tpidr_el0 | |
504 | add t0, tp, #:tprel_hi12:imm | |
505 | add t0, #:tprel_lo12_nc:imm | |
506 | */ | |
507 | ||
508 | static void | |
509 | aarch64_load_symref_appropriately (rtx dest, rtx imm, | |
510 | enum aarch64_symbol_type type) | |
511 | { | |
512 | switch (type) | |
513 | { | |
514 | case SYMBOL_SMALL_ABSOLUTE: | |
515 | { | |
516 | rtx tmp_reg = dest; | |
517 | if (can_create_pseudo_p ()) | |
518 | { | |
519 | tmp_reg = gen_reg_rtx (Pmode); | |
520 | } | |
521 | ||
522 | emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm)); | |
523 | emit_insn (gen_add_losym (dest, tmp_reg, imm)); | |
524 | return; | |
525 | } | |
526 | ||
527 | case SYMBOL_SMALL_GOT: | |
528 | { | |
529 | rtx tmp_reg = dest; | |
530 | if (can_create_pseudo_p ()) | |
531 | { | |
532 | tmp_reg = gen_reg_rtx (Pmode); | |
533 | } | |
534 | emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm)); | |
535 | emit_insn (gen_ldr_got_small (dest, tmp_reg, imm)); | |
536 | return; | |
537 | } | |
538 | ||
539 | case SYMBOL_SMALL_TLSGD: | |
540 | { | |
541 | rtx insns; | |
542 | rtx result = gen_rtx_REG (Pmode, R0_REGNUM); | |
543 | ||
544 | start_sequence (); | |
545 | emit_call_insn (gen_tlsgd_small (result, imm)); | |
546 | insns = get_insns (); | |
547 | end_sequence (); | |
548 | ||
549 | RTL_CONST_CALL_P (insns) = 1; | |
550 | emit_libcall_block (insns, dest, result, imm); | |
551 | return; | |
552 | } | |
553 | ||
554 | case SYMBOL_SMALL_TLSDESC: | |
555 | { | |
556 | rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM); | |
557 | rtx tp; | |
558 | ||
559 | emit_insn (gen_tlsdesc_small (imm)); | |
560 | tp = aarch64_load_tp (NULL); | |
561 | emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0))); | |
562 | set_unique_reg_note (get_last_insn (), REG_EQUIV, imm); | |
563 | return; | |
564 | } | |
565 | ||
566 | case SYMBOL_SMALL_GOTTPREL: | |
567 | { | |
568 | rtx tmp_reg = gen_reg_rtx (Pmode); | |
569 | rtx tp = aarch64_load_tp (NULL); | |
570 | emit_insn (gen_tlsie_small (tmp_reg, imm)); | |
571 | emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg))); | |
572 | set_unique_reg_note (get_last_insn (), REG_EQUIV, imm); | |
573 | return; | |
574 | } | |
575 | ||
576 | case SYMBOL_SMALL_TPREL: | |
577 | { | |
578 | rtx tp = aarch64_load_tp (NULL); | |
579 | emit_insn (gen_tlsle_small (dest, tp, imm)); | |
580 | set_unique_reg_note (get_last_insn (), REG_EQUIV, imm); | |
581 | return; | |
582 | } | |
583 | ||
584 | default: | |
585 | gcc_unreachable (); | |
586 | } | |
587 | } | |
588 | ||
589 | /* Emit a move from SRC to DEST. Assume that the move expanders can | |
590 | handle all moves if !can_create_pseudo_p (). The distinction is | |
591 | important because, unlike emit_move_insn, the move expanders know | |
592 | how to force Pmode objects into the constant pool even when the | |
593 | constant pool address is not itself legitimate. */ | |
594 | static rtx | |
595 | aarch64_emit_move (rtx dest, rtx src) | |
596 | { | |
597 | return (can_create_pseudo_p () | |
598 | ? emit_move_insn (dest, src) | |
599 | : emit_move_insn_1 (dest, src)); | |
600 | } | |
601 | ||
602 | void | |
603 | aarch64_split_128bit_move (rtx dst, rtx src) | |
604 | { | |
605 | rtx low_dst; | |
606 | ||
607 | gcc_assert (GET_MODE (dst) == TImode); | |
608 | ||
609 | if (REG_P (dst) && REG_P (src)) | |
610 | { | |
611 | int src_regno = REGNO (src); | |
612 | int dst_regno = REGNO (dst); | |
613 | ||
614 | gcc_assert (GET_MODE (src) == TImode); | |
615 | ||
616 | /* Handle r -> w, w -> r. */ | |
617 | if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno)) | |
618 | { | |
619 | emit_insn (gen_aarch64_movtilow_di (dst, | |
620 | gen_lowpart (word_mode, src))); | |
621 | emit_insn (gen_aarch64_movtihigh_di (dst, | |
622 | gen_highpart (word_mode, src))); | |
623 | return; | |
624 | } | |
625 | else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno)) | |
626 | { | |
627 | emit_insn (gen_aarch64_movdi_tilow (gen_lowpart (word_mode, dst), | |
628 | src)); | |
629 | emit_insn (gen_aarch64_movdi_tihigh (gen_highpart (word_mode, dst), | |
630 | src)); | |
631 | return; | |
632 | } | |
633 | /* Fall through to r -> r cases. */ | |
634 | } | |
635 | ||
636 | low_dst = gen_lowpart (word_mode, dst); | |
637 | if (REG_P (low_dst) | |
638 | && reg_overlap_mentioned_p (low_dst, src)) | |
639 | { | |
640 | aarch64_emit_move (gen_highpart (word_mode, dst), | |
641 | gen_highpart_mode (word_mode, TImode, src)); | |
642 | aarch64_emit_move (low_dst, gen_lowpart (word_mode, src)); | |
643 | } | |
644 | else | |
645 | { | |
646 | aarch64_emit_move (low_dst, gen_lowpart (word_mode, src)); | |
647 | aarch64_emit_move (gen_highpart (word_mode, dst), | |
648 | gen_highpart_mode (word_mode, TImode, src)); | |
649 | } | |
650 | } | |
651 | ||
652 | bool | |
653 | aarch64_split_128bit_move_p (rtx dst, rtx src) | |
654 | { | |
655 | return (! REG_P (src) | |
656 | || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src)))); | |
657 | } | |
658 | ||
659 | static rtx | |
660 | aarch64_force_temporary (rtx x, rtx value) | |
661 | { | |
662 | if (can_create_pseudo_p ()) | |
663 | return force_reg (Pmode, value); | |
664 | else | |
665 | { | |
666 | x = aarch64_emit_move (x, value); | |
667 | return x; | |
668 | } | |
669 | } | |
670 | ||
671 | ||
672 | static rtx | |
673 | aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset) | |
674 | { | |
675 | if (!aarch64_plus_immediate (GEN_INT (offset), DImode)) | |
676 | { | |
677 | rtx high; | |
678 | /* Load the full offset into a register. This | |
679 | might be improvable in the future. */ | |
680 | high = GEN_INT (offset); | |
681 | offset = 0; | |
682 | high = aarch64_force_temporary (temp, high); | |
683 | reg = aarch64_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg)); | |
684 | } | |
685 | return plus_constant (mode, reg, offset); | |
686 | } | |
687 | ||
688 | void | |
689 | aarch64_expand_mov_immediate (rtx dest, rtx imm) | |
690 | { | |
691 | enum machine_mode mode = GET_MODE (dest); | |
692 | unsigned HOST_WIDE_INT mask; | |
693 | int i; | |
694 | bool first; | |
695 | unsigned HOST_WIDE_INT val; | |
696 | bool subtargets; | |
697 | rtx subtarget; | |
698 | int one_match, zero_match; | |
699 | ||
700 | gcc_assert (mode == SImode || mode == DImode); | |
701 | ||
702 | /* Check on what type of symbol it is. */ | |
703 | if (GET_CODE (imm) == SYMBOL_REF | |
704 | || GET_CODE (imm) == LABEL_REF | |
705 | || GET_CODE (imm) == CONST) | |
706 | { | |
707 | rtx mem, base, offset; | |
708 | enum aarch64_symbol_type sty; | |
709 | ||
710 | /* If we have (const (plus symbol offset)), separate out the offset | |
711 | before we start classifying the symbol. */ | |
712 | split_const (imm, &base, &offset); | |
713 | ||
714 | sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR); | |
715 | switch (sty) | |
716 | { | |
717 | case SYMBOL_FORCE_TO_MEM: | |
718 | if (offset != const0_rtx | |
719 | && targetm.cannot_force_const_mem (mode, imm)) | |
720 | { | |
721 | gcc_assert(can_create_pseudo_p ()); | |
722 | base = aarch64_force_temporary (dest, base); | |
723 | base = aarch64_add_offset (mode, NULL, base, INTVAL (offset)); | |
724 | aarch64_emit_move (dest, base); | |
725 | return; | |
726 | } | |
727 | mem = force_const_mem (mode, imm); | |
728 | gcc_assert (mem); | |
729 | emit_insn (gen_rtx_SET (VOIDmode, dest, mem)); | |
730 | return; | |
731 | ||
732 | case SYMBOL_SMALL_TLSGD: | |
733 | case SYMBOL_SMALL_TLSDESC: | |
734 | case SYMBOL_SMALL_GOTTPREL: | |
735 | case SYMBOL_SMALL_GOT: | |
736 | if (offset != const0_rtx) | |
737 | { | |
738 | gcc_assert(can_create_pseudo_p ()); | |
739 | base = aarch64_force_temporary (dest, base); | |
740 | base = aarch64_add_offset (mode, NULL, base, INTVAL (offset)); | |
741 | aarch64_emit_move (dest, base); | |
742 | return; | |
743 | } | |
744 | /* FALLTHRU */ | |
745 | ||
746 | case SYMBOL_SMALL_TPREL: | |
747 | case SYMBOL_SMALL_ABSOLUTE: | |
748 | aarch64_load_symref_appropriately (dest, imm, sty); | |
749 | return; | |
750 | ||
751 | default: | |
752 | gcc_unreachable (); | |
753 | } | |
754 | } | |
755 | ||
756 | if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode)) | |
757 | { | |
758 | emit_insn (gen_rtx_SET (VOIDmode, dest, imm)); | |
759 | return; | |
760 | } | |
761 | ||
762 | if (!CONST_INT_P (imm)) | |
763 | { | |
764 | if (GET_CODE (imm) == HIGH) | |
765 | emit_insn (gen_rtx_SET (VOIDmode, dest, imm)); | |
766 | else | |
767 | { | |
768 | rtx mem = force_const_mem (mode, imm); | |
769 | gcc_assert (mem); | |
770 | emit_insn (gen_rtx_SET (VOIDmode, dest, mem)); | |
771 | } | |
772 | ||
773 | return; | |
774 | } | |
775 | ||
776 | if (mode == SImode) | |
777 | { | |
778 | /* We know we can't do this in 1 insn, and we must be able to do it | |
779 | in two; so don't mess around looking for sequences that don't buy | |
780 | us anything. */ | |
781 | emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff))); | |
782 | emit_insn (gen_insv_immsi (dest, GEN_INT (16), | |
783 | GEN_INT ((INTVAL (imm) >> 16) & 0xffff))); | |
784 | return; | |
785 | } | |
786 | ||
787 | /* Remaining cases are all for DImode. */ | |
788 | ||
789 | val = INTVAL (imm); | |
790 | subtargets = optimize && can_create_pseudo_p (); | |
791 | ||
792 | one_match = 0; | |
793 | zero_match = 0; | |
794 | mask = 0xffff; | |
795 | ||
796 | for (i = 0; i < 64; i += 16, mask <<= 16) | |
797 | { | |
798 | if ((val & mask) == 0) | |
799 | zero_match++; | |
800 | else if ((val & mask) == mask) | |
801 | one_match++; | |
802 | } | |
803 | ||
804 | if (one_match == 2) | |
805 | { | |
806 | mask = 0xffff; | |
807 | for (i = 0; i < 64; i += 16, mask <<= 16) | |
808 | { | |
809 | if ((val & mask) != mask) | |
810 | { | |
811 | emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask))); | |
812 | emit_insn (gen_insv_immdi (dest, GEN_INT (i), | |
813 | GEN_INT ((val >> i) & 0xffff))); | |
814 | return; | |
815 | } | |
816 | } | |
817 | gcc_unreachable (); | |
818 | } | |
819 | ||
820 | if (zero_match == 2) | |
821 | goto simple_sequence; | |
822 | ||
823 | mask = 0x0ffff0000UL; | |
824 | for (i = 16; i < 64; i += 16, mask <<= 16) | |
825 | { | |
826 | HOST_WIDE_INT comp = mask & ~(mask - 1); | |
827 | ||
828 | if (aarch64_uimm12_shift (val - (val & mask))) | |
829 | { | |
830 | subtarget = subtargets ? gen_reg_rtx (DImode) : dest; | |
831 | ||
832 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask))); | |
833 | emit_insn (gen_adddi3 (dest, subtarget, | |
834 | GEN_INT (val - (val & mask)))); | |
835 | return; | |
836 | } | |
837 | else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask)))) | |
838 | { | |
839 | subtarget = subtargets ? gen_reg_rtx (DImode) : dest; | |
840 | ||
841 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, | |
842 | GEN_INT ((val + comp) & mask))); | |
843 | emit_insn (gen_adddi3 (dest, subtarget, | |
844 | GEN_INT (val - ((val + comp) & mask)))); | |
845 | return; | |
846 | } | |
847 | else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask))) | |
848 | { | |
849 | subtarget = subtargets ? gen_reg_rtx (DImode) : dest; | |
850 | ||
851 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, | |
852 | GEN_INT ((val - comp) | ~mask))); | |
853 | emit_insn (gen_adddi3 (dest, subtarget, | |
854 | GEN_INT (val - ((val - comp) | ~mask)))); | |
855 | return; | |
856 | } | |
857 | else if (aarch64_uimm12_shift (-(val - (val | ~mask)))) | |
858 | { | |
859 | subtarget = subtargets ? gen_reg_rtx (DImode) : dest; | |
860 | ||
861 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, | |
862 | GEN_INT (val | ~mask))); | |
863 | emit_insn (gen_adddi3 (dest, subtarget, | |
864 | GEN_INT (val - (val | ~mask)))); | |
865 | return; | |
866 | } | |
867 | } | |
868 | ||
869 | /* See if we can do it by arithmetically combining two | |
870 | immediates. */ | |
871 | for (i = 0; i < AARCH64_NUM_BITMASKS; i++) | |
872 | { | |
873 | int j; | |
874 | mask = 0xffff; | |
875 | ||
876 | if (aarch64_uimm12_shift (val - aarch64_bitmasks[i]) | |
877 | || aarch64_uimm12_shift (-val + aarch64_bitmasks[i])) | |
878 | { | |
879 | subtarget = subtargets ? gen_reg_rtx (DImode) : dest; | |
880 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, | |
881 | GEN_INT (aarch64_bitmasks[i]))); | |
882 | emit_insn (gen_adddi3 (dest, subtarget, | |
883 | GEN_INT (val - aarch64_bitmasks[i]))); | |
884 | return; | |
885 | } | |
886 | ||
887 | for (j = 0; j < 64; j += 16, mask <<= 16) | |
888 | { | |
889 | if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask)) | |
890 | { | |
891 | emit_insn (gen_rtx_SET (VOIDmode, dest, | |
892 | GEN_INT (aarch64_bitmasks[i]))); | |
893 | emit_insn (gen_insv_immdi (dest, GEN_INT (j), | |
894 | GEN_INT ((val >> j) & 0xffff))); | |
895 | return; | |
896 | } | |
897 | } | |
898 | } | |
899 | ||
900 | /* See if we can do it by logically combining two immediates. */ | |
901 | for (i = 0; i < AARCH64_NUM_BITMASKS; i++) | |
902 | { | |
903 | if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i]) | |
904 | { | |
905 | int j; | |
906 | ||
907 | for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++) | |
908 | if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j])) | |
909 | { | |
910 | subtarget = subtargets ? gen_reg_rtx (mode) : dest; | |
911 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, | |
912 | GEN_INT (aarch64_bitmasks[i]))); | |
913 | emit_insn (gen_iordi3 (dest, subtarget, | |
914 | GEN_INT (aarch64_bitmasks[j]))); | |
915 | return; | |
916 | } | |
917 | } | |
918 | else if ((val & aarch64_bitmasks[i]) == val) | |
919 | { | |
920 | int j; | |
921 | ||
922 | for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++) | |
923 | if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i])) | |
924 | { | |
925 | ||
926 | subtarget = subtargets ? gen_reg_rtx (mode) : dest; | |
927 | emit_insn (gen_rtx_SET (VOIDmode, subtarget, | |
928 | GEN_INT (aarch64_bitmasks[j]))); | |
929 | emit_insn (gen_anddi3 (dest, subtarget, | |
930 | GEN_INT (aarch64_bitmasks[i]))); | |
931 | return; | |
932 | } | |
933 | } | |
934 | } | |
935 | ||
936 | simple_sequence: | |
937 | first = true; | |
938 | mask = 0xffff; | |
939 | for (i = 0; i < 64; i += 16, mask <<= 16) | |
940 | { | |
941 | if ((val & mask) != 0) | |
942 | { | |
943 | if (first) | |
944 | { | |
945 | emit_insn (gen_rtx_SET (VOIDmode, dest, | |
946 | GEN_INT (val & mask))); | |
947 | first = false; | |
948 | } | |
949 | else | |
950 | emit_insn (gen_insv_immdi (dest, GEN_INT (i), | |
951 | GEN_INT ((val >> i) & 0xffff))); | |
952 | } | |
953 | } | |
954 | } | |
955 | ||
956 | static bool | |
957 | aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) | |
958 | { | |
959 | /* Indirect calls are not currently supported. */ | |
960 | if (decl == NULL) | |
961 | return false; | |
962 | ||
963 | /* Cannot tail-call to long-calls, since these are outside of the | |
964 | range of a branch instruction (we could handle this if we added | |
965 | support for indirect tail-calls. */ | |
966 | if (aarch64_decl_is_long_call_p (decl)) | |
967 | return false; | |
968 | ||
969 | return true; | |
970 | } | |
971 | ||
972 | /* Implement TARGET_PASS_BY_REFERENCE. */ | |
973 | ||
974 | static bool | |
975 | aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED, | |
976 | enum machine_mode mode, | |
977 | const_tree type, | |
978 | bool named ATTRIBUTE_UNUSED) | |
979 | { | |
980 | HOST_WIDE_INT size; | |
981 | enum machine_mode dummymode; | |
982 | int nregs; | |
983 | ||
984 | /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */ | |
985 | size = (mode == BLKmode && type) | |
986 | ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); | |
987 | ||
988 | if (type) | |
989 | { | |
990 | /* Arrays always passed by reference. */ | |
991 | if (TREE_CODE (type) == ARRAY_TYPE) | |
992 | return true; | |
993 | /* Other aggregates based on their size. */ | |
994 | if (AGGREGATE_TYPE_P (type)) | |
995 | size = int_size_in_bytes (type); | |
996 | } | |
997 | ||
998 | /* Variable sized arguments are always returned by reference. */ | |
999 | if (size < 0) | |
1000 | return true; | |
1001 | ||
1002 | /* Can this be a candidate to be passed in fp/simd register(s)? */ | |
1003 | if (aarch64_vfp_is_call_or_return_candidate (mode, type, | |
1004 | &dummymode, &nregs, | |
1005 | NULL)) | |
1006 | return false; | |
1007 | ||
1008 | /* Arguments which are variable sized or larger than 2 registers are | |
1009 | passed by reference unless they are a homogenous floating point | |
1010 | aggregate. */ | |
1011 | return size > 2 * UNITS_PER_WORD; | |
1012 | } | |
1013 | ||
1014 | /* Return TRUE if VALTYPE is padded to its least significant bits. */ | |
1015 | static bool | |
1016 | aarch64_return_in_msb (const_tree valtype) | |
1017 | { | |
1018 | enum machine_mode dummy_mode; | |
1019 | int dummy_int; | |
1020 | ||
1021 | /* Never happens in little-endian mode. */ | |
1022 | if (!BYTES_BIG_ENDIAN) | |
1023 | return false; | |
1024 | ||
1025 | /* Only composite types smaller than or equal to 16 bytes can | |
1026 | be potentially returned in registers. */ | |
1027 | if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype)) | |
1028 | || int_size_in_bytes (valtype) <= 0 | |
1029 | || int_size_in_bytes (valtype) > 16) | |
1030 | return false; | |
1031 | ||
1032 | /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate) | |
1033 | or an HVA (Homogeneous Short-Vector Aggregate); such a special composite | |
1034 | is always passed/returned in the least significant bits of fp/simd | |
1035 | register(s). */ | |
1036 | if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype, | |
1037 | &dummy_mode, &dummy_int, NULL)) | |
1038 | return false; | |
1039 | ||
1040 | return true; | |
1041 | } | |
1042 | ||
1043 | /* Implement TARGET_FUNCTION_VALUE. | |
1044 | Define how to find the value returned by a function. */ | |
1045 | ||
1046 | static rtx | |
1047 | aarch64_function_value (const_tree type, const_tree func, | |
1048 | bool outgoing ATTRIBUTE_UNUSED) | |
1049 | { | |
1050 | enum machine_mode mode; | |
1051 | int unsignedp; | |
1052 | int count; | |
1053 | enum machine_mode ag_mode; | |
1054 | ||
1055 | mode = TYPE_MODE (type); | |
1056 | if (INTEGRAL_TYPE_P (type)) | |
1057 | mode = promote_function_mode (type, mode, &unsignedp, func, 1); | |
1058 | ||
1059 | if (aarch64_return_in_msb (type)) | |
1060 | { | |
1061 | HOST_WIDE_INT size = int_size_in_bytes (type); | |
1062 | ||
1063 | if (size % UNITS_PER_WORD != 0) | |
1064 | { | |
1065 | size += UNITS_PER_WORD - size % UNITS_PER_WORD; | |
1066 | mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0); | |
1067 | } | |
1068 | } | |
1069 | ||
1070 | if (aarch64_vfp_is_call_or_return_candidate (mode, type, | |
1071 | &ag_mode, &count, NULL)) | |
1072 | { | |
1073 | if (!aarch64_composite_type_p (type, mode)) | |
1074 | { | |
1075 | gcc_assert (count == 1 && mode == ag_mode); | |
1076 | return gen_rtx_REG (mode, V0_REGNUM); | |
1077 | } | |
1078 | else | |
1079 | { | |
1080 | int i; | |
1081 | rtx par; | |
1082 | ||
1083 | par = gen_rtx_PARALLEL (mode, rtvec_alloc (count)); | |
1084 | for (i = 0; i < count; i++) | |
1085 | { | |
1086 | rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i); | |
1087 | tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, | |
1088 | GEN_INT (i * GET_MODE_SIZE (ag_mode))); | |
1089 | XVECEXP (par, 0, i) = tmp; | |
1090 | } | |
1091 | return par; | |
1092 | } | |
1093 | } | |
1094 | else | |
1095 | return gen_rtx_REG (mode, R0_REGNUM); | |
1096 | } | |
1097 | ||
1098 | /* Implements TARGET_FUNCTION_VALUE_REGNO_P. | |
1099 | Return true if REGNO is the number of a hard register in which the values | |
1100 | of called function may come back. */ | |
1101 | ||
1102 | static bool | |
1103 | aarch64_function_value_regno_p (const unsigned int regno) | |
1104 | { | |
1105 | /* Maximum of 16 bytes can be returned in the general registers. Examples | |
1106 | of 16-byte return values are: 128-bit integers and 16-byte small | |
1107 | structures (excluding homogeneous floating-point aggregates). */ | |
1108 | if (regno == R0_REGNUM || regno == R1_REGNUM) | |
1109 | return true; | |
1110 | ||
1111 | /* Up to four fp/simd registers can return a function value, e.g. a | |
1112 | homogeneous floating-point aggregate having four members. */ | |
1113 | if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS) | |
1114 | return !TARGET_GENERAL_REGS_ONLY; | |
1115 | ||
1116 | return false; | |
1117 | } | |
1118 | ||
1119 | /* Implement TARGET_RETURN_IN_MEMORY. | |
1120 | ||
1121 | If the type T of the result of a function is such that | |
1122 | void func (T arg) | |
1123 | would require that arg be passed as a value in a register (or set of | |
1124 | registers) according to the parameter passing rules, then the result | |
1125 | is returned in the same registers as would be used for such an | |
1126 | argument. */ | |
1127 | ||
1128 | static bool | |
1129 | aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) | |
1130 | { | |
1131 | HOST_WIDE_INT size; | |
1132 | enum machine_mode ag_mode; | |
1133 | int count; | |
1134 | ||
1135 | if (!AGGREGATE_TYPE_P (type) | |
1136 | && TREE_CODE (type) != COMPLEX_TYPE | |
1137 | && TREE_CODE (type) != VECTOR_TYPE) | |
1138 | /* Simple scalar types always returned in registers. */ | |
1139 | return false; | |
1140 | ||
1141 | if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type), | |
1142 | type, | |
1143 | &ag_mode, | |
1144 | &count, | |
1145 | NULL)) | |
1146 | return false; | |
1147 | ||
1148 | /* Types larger than 2 registers returned in memory. */ | |
1149 | size = int_size_in_bytes (type); | |
1150 | return (size < 0 || size > 2 * UNITS_PER_WORD); | |
1151 | } | |
1152 | ||
1153 | static bool | |
1154 | aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode, | |
1155 | const_tree type, int *nregs) | |
1156 | { | |
1157 | CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); | |
1158 | return aarch64_vfp_is_call_or_return_candidate (mode, | |
1159 | type, | |
1160 | &pcum->aapcs_vfp_rmode, | |
1161 | nregs, | |
1162 | NULL); | |
1163 | } | |
1164 | ||
1165 | /* Given MODE and TYPE of a function argument, return the alignment in | |
1166 | bits. The idea is to suppress any stronger alignment requested by | |
1167 | the user and opt for the natural alignment (specified in AAPCS64 \S 4.1). | |
1168 | This is a helper function for local use only. */ | |
1169 | ||
1170 | static unsigned int | |
1171 | aarch64_function_arg_alignment (enum machine_mode mode, const_tree type) | |
1172 | { | |
1173 | unsigned int alignment; | |
1174 | ||
1175 | if (type) | |
1176 | { | |
1177 | if (!integer_zerop (TYPE_SIZE (type))) | |
1178 | { | |
1179 | if (TYPE_MODE (type) == mode) | |
1180 | alignment = TYPE_ALIGN (type); | |
1181 | else | |
1182 | alignment = GET_MODE_ALIGNMENT (mode); | |
1183 | } | |
1184 | else | |
1185 | alignment = 0; | |
1186 | } | |
1187 | else | |
1188 | alignment = GET_MODE_ALIGNMENT (mode); | |
1189 | ||
1190 | return alignment; | |
1191 | } | |
1192 | ||
1193 | /* Layout a function argument according to the AAPCS64 rules. The rule | |
1194 | numbers refer to the rule numbers in the AAPCS64. */ | |
1195 | ||
1196 | static void | |
1197 | aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode, | |
1198 | const_tree type, | |
1199 | bool named ATTRIBUTE_UNUSED) | |
1200 | { | |
1201 | CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); | |
1202 | int ncrn, nvrn, nregs; | |
1203 | bool allocate_ncrn, allocate_nvrn; | |
1204 | ||
1205 | /* We need to do this once per argument. */ | |
1206 | if (pcum->aapcs_arg_processed) | |
1207 | return; | |
1208 | ||
1209 | pcum->aapcs_arg_processed = true; | |
1210 | ||
1211 | allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode); | |
1212 | allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v, | |
1213 | mode, | |
1214 | type, | |
1215 | &nregs); | |
1216 | ||
1217 | /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable. | |
1218 | The following code thus handles passing by SIMD/FP registers first. */ | |
1219 | ||
1220 | nvrn = pcum->aapcs_nvrn; | |
1221 | ||
1222 | /* C1 - C5 for floating point, homogenous floating point aggregates (HFA) | |
1223 | and homogenous short-vector aggregates (HVA). */ | |
1224 | if (allocate_nvrn) | |
1225 | { | |
1226 | if (nvrn + nregs <= NUM_FP_ARG_REGS) | |
1227 | { | |
1228 | pcum->aapcs_nextnvrn = nvrn + nregs; | |
1229 | if (!aarch64_composite_type_p (type, mode)) | |
1230 | { | |
1231 | gcc_assert (nregs == 1); | |
1232 | pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn); | |
1233 | } | |
1234 | else | |
1235 | { | |
1236 | rtx par; | |
1237 | int i; | |
1238 | par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs)); | |
1239 | for (i = 0; i < nregs; i++) | |
1240 | { | |
1241 | rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode, | |
1242 | V0_REGNUM + nvrn + i); | |
1243 | tmp = gen_rtx_EXPR_LIST | |
1244 | (VOIDmode, tmp, | |
1245 | GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode))); | |
1246 | XVECEXP (par, 0, i) = tmp; | |
1247 | } | |
1248 | pcum->aapcs_reg = par; | |
1249 | } | |
1250 | return; | |
1251 | } | |
1252 | else | |
1253 | { | |
1254 | /* C.3 NSRN is set to 8. */ | |
1255 | pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS; | |
1256 | goto on_stack; | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | ncrn = pcum->aapcs_ncrn; | |
1261 | nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)) | |
1262 | + UNITS_PER_WORD - 1) / UNITS_PER_WORD; | |
1263 | ||
1264 | ||
1265 | /* C6 - C9. though the sign and zero extension semantics are | |
1266 | handled elsewhere. This is the case where the argument fits | |
1267 | entirely general registers. */ | |
1268 | if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS)) | |
1269 | { | |
1270 | unsigned int alignment = aarch64_function_arg_alignment (mode, type); | |
1271 | ||
1272 | gcc_assert (nregs == 0 || nregs == 1 || nregs == 2); | |
1273 | ||
1274 | /* C.8 if the argument has an alignment of 16 then the NGRN is | |
1275 | rounded up to the next even number. */ | |
1276 | if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2) | |
1277 | { | |
1278 | ++ncrn; | |
1279 | gcc_assert (ncrn + nregs <= NUM_ARG_REGS); | |
1280 | } | |
1281 | /* NREGS can be 0 when e.g. an empty structure is to be passed. | |
1282 | A reg is still generated for it, but the caller should be smart | |
1283 | enough not to use it. */ | |
1284 | if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT) | |
1285 | { | |
1286 | pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn); | |
1287 | } | |
1288 | else | |
1289 | { | |
1290 | rtx par; | |
1291 | int i; | |
1292 | ||
1293 | par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs)); | |
1294 | for (i = 0; i < nregs; i++) | |
1295 | { | |
1296 | rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i); | |
1297 | tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, | |
1298 | GEN_INT (i * UNITS_PER_WORD)); | |
1299 | XVECEXP (par, 0, i) = tmp; | |
1300 | } | |
1301 | pcum->aapcs_reg = par; | |
1302 | } | |
1303 | ||
1304 | pcum->aapcs_nextncrn = ncrn + nregs; | |
1305 | return; | |
1306 | } | |
1307 | ||
1308 | /* C.11 */ | |
1309 | pcum->aapcs_nextncrn = NUM_ARG_REGS; | |
1310 | ||
1311 | /* The argument is passed on stack; record the needed number of words for | |
1312 | this argument (we can re-use NREGS) and align the total size if | |
1313 | necessary. */ | |
1314 | on_stack: | |
1315 | pcum->aapcs_stack_words = nregs; | |
1316 | if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT) | |
1317 | pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size, | |
1318 | 16 / UNITS_PER_WORD) + 1; | |
1319 | return; | |
1320 | } | |
1321 | ||
1322 | /* Implement TARGET_FUNCTION_ARG. */ | |
1323 | ||
1324 | static rtx | |
1325 | aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode, | |
1326 | const_tree type, bool named) | |
1327 | { | |
1328 | CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); | |
1329 | gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64); | |
1330 | ||
1331 | if (mode == VOIDmode) | |
1332 | return NULL_RTX; | |
1333 | ||
1334 | aarch64_layout_arg (pcum_v, mode, type, named); | |
1335 | return pcum->aapcs_reg; | |
1336 | } | |
1337 | ||
1338 | void | |
1339 | aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum, | |
1340 | const_tree fntype ATTRIBUTE_UNUSED, | |
1341 | rtx libname ATTRIBUTE_UNUSED, | |
1342 | const_tree fndecl ATTRIBUTE_UNUSED, | |
1343 | unsigned n_named ATTRIBUTE_UNUSED) | |
1344 | { | |
1345 | pcum->aapcs_ncrn = 0; | |
1346 | pcum->aapcs_nvrn = 0; | |
1347 | pcum->aapcs_nextncrn = 0; | |
1348 | pcum->aapcs_nextnvrn = 0; | |
1349 | pcum->pcs_variant = ARM_PCS_AAPCS64; | |
1350 | pcum->aapcs_reg = NULL_RTX; | |
1351 | pcum->aapcs_arg_processed = false; | |
1352 | pcum->aapcs_stack_words = 0; | |
1353 | pcum->aapcs_stack_size = 0; | |
1354 | ||
1355 | return; | |
1356 | } | |
1357 | ||
1358 | static void | |
1359 | aarch64_function_arg_advance (cumulative_args_t pcum_v, | |
1360 | enum machine_mode mode, | |
1361 | const_tree type, | |
1362 | bool named) | |
1363 | { | |
1364 | CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); | |
1365 | if (pcum->pcs_variant == ARM_PCS_AAPCS64) | |
1366 | { | |
1367 | aarch64_layout_arg (pcum_v, mode, type, named); | |
1368 | gcc_assert ((pcum->aapcs_reg != NULL_RTX) | |
1369 | != (pcum->aapcs_stack_words != 0)); | |
1370 | pcum->aapcs_arg_processed = false; | |
1371 | pcum->aapcs_ncrn = pcum->aapcs_nextncrn; | |
1372 | pcum->aapcs_nvrn = pcum->aapcs_nextnvrn; | |
1373 | pcum->aapcs_stack_size += pcum->aapcs_stack_words; | |
1374 | pcum->aapcs_stack_words = 0; | |
1375 | pcum->aapcs_reg = NULL_RTX; | |
1376 | } | |
1377 | } | |
1378 | ||
1379 | bool | |
1380 | aarch64_function_arg_regno_p (unsigned regno) | |
1381 | { | |
1382 | return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS) | |
1383 | || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS)); | |
1384 | } | |
1385 | ||
1386 | /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least | |
1387 | PARM_BOUNDARY bits of alignment, but will be given anything up | |
1388 | to STACK_BOUNDARY bits if the type requires it. This makes sure | |
1389 | that both before and after the layout of each argument, the Next | |
1390 | Stacked Argument Address (NSAA) will have a minimum alignment of | |
1391 | 8 bytes. */ | |
1392 | ||
1393 | static unsigned int | |
1394 | aarch64_function_arg_boundary (enum machine_mode mode, const_tree type) | |
1395 | { | |
1396 | unsigned int alignment = aarch64_function_arg_alignment (mode, type); | |
1397 | ||
1398 | if (alignment < PARM_BOUNDARY) | |
1399 | alignment = PARM_BOUNDARY; | |
1400 | if (alignment > STACK_BOUNDARY) | |
1401 | alignment = STACK_BOUNDARY; | |
1402 | return alignment; | |
1403 | } | |
1404 | ||
1405 | /* For use by FUNCTION_ARG_PADDING (MODE, TYPE). | |
1406 | ||
1407 | Return true if an argument passed on the stack should be padded upwards, | |
1408 | i.e. if the least-significant byte of the stack slot has useful data. | |
1409 | ||
1410 | Small aggregate types are placed in the lowest memory address. | |
1411 | ||
1412 | The related parameter passing rules are B.4, C.3, C.5 and C.14. */ | |
1413 | ||
1414 | bool | |
1415 | aarch64_pad_arg_upward (enum machine_mode mode, const_tree type) | |
1416 | { | |
1417 | /* On little-endian targets, the least significant byte of every stack | |
1418 | argument is passed at the lowest byte address of the stack slot. */ | |
1419 | if (!BYTES_BIG_ENDIAN) | |
1420 | return true; | |
1421 | ||
1422 | /* Otherwise, integral types and floating point types are padded downward: | |
1423 | the least significant byte of a stack argument is passed at the highest | |
1424 | byte address of the stack slot. */ | |
1425 | if (type | |
1426 | ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)) | |
1427 | : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode))) | |
1428 | return false; | |
1429 | ||
1430 | /* Everything else padded upward, i.e. data in first byte of stack slot. */ | |
1431 | return true; | |
1432 | } | |
1433 | ||
1434 | /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST). | |
1435 | ||
1436 | It specifies padding for the last (may also be the only) | |
1437 | element of a block move between registers and memory. If | |
1438 | assuming the block is in the memory, padding upward means that | |
1439 | the last element is padded after its highest significant byte, | |
1440 | while in downward padding, the last element is padded at the | |
1441 | its least significant byte side. | |
1442 | ||
1443 | Small aggregates and small complex types are always padded | |
1444 | upwards. | |
1445 | ||
1446 | We don't need to worry about homogeneous floating-point or | |
1447 | short-vector aggregates; their move is not affected by the | |
1448 | padding direction determined here. Regardless of endianness, | |
1449 | each element of such an aggregate is put in the least | |
1450 | significant bits of a fp/simd register. | |
1451 | ||
1452 | Return !BYTES_BIG_ENDIAN if the least significant byte of the | |
1453 | register has useful data, and return the opposite if the most | |
1454 | significant byte does. */ | |
1455 | ||
1456 | bool | |
1457 | aarch64_pad_reg_upward (enum machine_mode mode, const_tree type, | |
1458 | bool first ATTRIBUTE_UNUSED) | |
1459 | { | |
1460 | ||
1461 | /* Small composite types are always padded upward. */ | |
1462 | if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode)) | |
1463 | { | |
1464 | HOST_WIDE_INT size = (type ? int_size_in_bytes (type) | |
1465 | : GET_MODE_SIZE (mode)); | |
1466 | if (size < 2 * UNITS_PER_WORD) | |
1467 | return true; | |
1468 | } | |
1469 | ||
1470 | /* Otherwise, use the default padding. */ | |
1471 | return !BYTES_BIG_ENDIAN; | |
1472 | } | |
1473 | ||
1474 | static enum machine_mode | |
1475 | aarch64_libgcc_cmp_return_mode (void) | |
1476 | { | |
1477 | return SImode; | |
1478 | } | |
1479 | ||
1480 | static bool | |
1481 | aarch64_frame_pointer_required (void) | |
1482 | { | |
1483 | /* If the function contains dynamic stack allocations, we need to | |
1484 | use the frame pointer to access the static parts of the frame. */ | |
1485 | if (cfun->calls_alloca) | |
1486 | return true; | |
1487 | ||
1488 | /* We may have turned flag_omit_frame_pointer on in order to have this | |
1489 | function called; if we did, we also set the 'faked_omit_frame_pointer' flag | |
1490 | and we'll check it here. | |
1491 | If we really did set flag_omit_frame_pointer normally, then we return false | |
1492 | (no frame pointer required) in all cases. */ | |
1493 | ||
1494 | if (flag_omit_frame_pointer && !faked_omit_frame_pointer) | |
1495 | return false; | |
1496 | else if (flag_omit_leaf_frame_pointer) | |
1497 | return !crtl->is_leaf; | |
1498 | return true; | |
1499 | } | |
1500 | ||
1501 | /* Mark the registers that need to be saved by the callee and calculate | |
1502 | the size of the callee-saved registers area and frame record (both FP | |
1503 | and LR may be omitted). */ | |
1504 | static void | |
1505 | aarch64_layout_frame (void) | |
1506 | { | |
1507 | HOST_WIDE_INT offset = 0; | |
1508 | int regno; | |
1509 | ||
1510 | if (reload_completed && cfun->machine->frame.laid_out) | |
1511 | return; | |
1512 | ||
1513 | cfun->machine->frame.fp_lr_offset = 0; | |
1514 | ||
1515 | /* First mark all the registers that really need to be saved... */ | |
1516 | for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++) | |
1517 | cfun->machine->frame.reg_offset[regno] = -1; | |
1518 | ||
1519 | for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) | |
1520 | cfun->machine->frame.reg_offset[regno] = -1; | |
1521 | ||
1522 | /* ... that includes the eh data registers (if needed)... */ | |
1523 | if (crtl->calls_eh_return) | |
1524 | for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++) | |
1525 | cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0; | |
1526 | ||
1527 | /* ... and any callee saved register that dataflow says is live. */ | |
1528 | for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++) | |
1529 | if (df_regs_ever_live_p (regno) | |
1530 | && !call_used_regs[regno]) | |
1531 | cfun->machine->frame.reg_offset[regno] = 0; | |
1532 | ||
1533 | for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) | |
1534 | if (df_regs_ever_live_p (regno) | |
1535 | && !call_used_regs[regno]) | |
1536 | cfun->machine->frame.reg_offset[regno] = 0; | |
1537 | ||
1538 | if (frame_pointer_needed) | |
1539 | { | |
1540 | cfun->machine->frame.reg_offset[R30_REGNUM] = 0; | |
1541 | cfun->machine->frame.reg_offset[R29_REGNUM] = 0; | |
1542 | cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD; | |
1543 | } | |
1544 | ||
1545 | /* Now assign stack slots for them. */ | |
1546 | for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++) | |
1547 | if (cfun->machine->frame.reg_offset[regno] != -1) | |
1548 | { | |
1549 | cfun->machine->frame.reg_offset[regno] = offset; | |
1550 | offset += UNITS_PER_WORD; | |
1551 | } | |
1552 | ||
1553 | for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) | |
1554 | if (cfun->machine->frame.reg_offset[regno] != -1) | |
1555 | { | |
1556 | cfun->machine->frame.reg_offset[regno] = offset; | |
1557 | offset += UNITS_PER_WORD; | |
1558 | } | |
1559 | ||
1560 | if (frame_pointer_needed) | |
1561 | { | |
1562 | cfun->machine->frame.reg_offset[R29_REGNUM] = offset; | |
1563 | offset += UNITS_PER_WORD; | |
1564 | cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD; | |
1565 | } | |
1566 | ||
1567 | if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1) | |
1568 | { | |
1569 | cfun->machine->frame.reg_offset[R30_REGNUM] = offset; | |
1570 | offset += UNITS_PER_WORD; | |
1571 | cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD; | |
1572 | } | |
1573 | ||
1574 | cfun->machine->frame.padding0 = | |
1575 | (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset); | |
1576 | offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT); | |
1577 | ||
1578 | cfun->machine->frame.saved_regs_size = offset; | |
1579 | cfun->machine->frame.laid_out = true; | |
1580 | } | |
1581 | ||
1582 | /* Make the last instruction frame-related and note that it performs | |
1583 | the operation described by FRAME_PATTERN. */ | |
1584 | ||
1585 | static void | |
1586 | aarch64_set_frame_expr (rtx frame_pattern) | |
1587 | { | |
1588 | rtx insn; | |
1589 | ||
1590 | insn = get_last_insn (); | |
1591 | RTX_FRAME_RELATED_P (insn) = 1; | |
1592 | RTX_FRAME_RELATED_P (frame_pattern) = 1; | |
1593 | REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, | |
1594 | frame_pattern, | |
1595 | REG_NOTES (insn)); | |
1596 | } | |
1597 | ||
1598 | static bool | |
1599 | aarch64_register_saved_on_entry (int regno) | |
1600 | { | |
1601 | return cfun->machine->frame.reg_offset[regno] != -1; | |
1602 | } | |
1603 | ||
1604 | ||
1605 | static void | |
1606 | aarch64_save_or_restore_fprs (int start_offset, int increment, | |
1607 | bool restore, rtx base_rtx) | |
1608 | ||
1609 | { | |
1610 | unsigned regno; | |
1611 | unsigned regno2; | |
1612 | rtx insn; | |
1613 | rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM; | |
1614 | ||
1615 | ||
1616 | for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) | |
1617 | { | |
1618 | if (aarch64_register_saved_on_entry (regno)) | |
1619 | { | |
1620 | rtx mem; | |
1621 | mem = gen_mem_ref (DFmode, | |
1622 | plus_constant (Pmode, | |
1623 | base_rtx, | |
1624 | start_offset)); | |
1625 | ||
1626 | for (regno2 = regno + 1; | |
1627 | regno2 <= V31_REGNUM | |
1628 | && !aarch64_register_saved_on_entry (regno2); | |
1629 | regno2++) | |
1630 | { | |
1631 | /* Empty loop. */ | |
1632 | } | |
1633 | if (regno2 <= V31_REGNUM && | |
1634 | aarch64_register_saved_on_entry (regno2)) | |
1635 | { | |
1636 | rtx mem2; | |
1637 | /* Next highest register to be saved. */ | |
1638 | mem2 = gen_mem_ref (DFmode, | |
1639 | plus_constant | |
1640 | (Pmode, | |
1641 | base_rtx, | |
1642 | start_offset + increment)); | |
1643 | if (restore == false) | |
1644 | { | |
1645 | insn = emit_insn | |
1646 | ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno), | |
1647 | mem2, gen_rtx_REG (DFmode, regno2))); | |
1648 | ||
1649 | } | |
1650 | else | |
1651 | { | |
1652 | insn = emit_insn | |
1653 | ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem, | |
1654 | gen_rtx_REG (DFmode, regno2), mem2)); | |
1655 | ||
1656 | add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno)); | |
1657 | add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2)); | |
1658 | } | |
1659 | ||
1660 | /* The first part of a frame-related parallel insn | |
1661 | is always assumed to be relevant to the frame | |
1662 | calculations; subsequent parts, are only | |
1663 | frame-related if explicitly marked. */ | |
1664 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, | |
1665 | 1)) = 1; | |
1666 | regno = regno2; | |
1667 | start_offset += increment * 2; | |
1668 | } | |
1669 | else | |
1670 | { | |
1671 | if (restore == false) | |
1672 | insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno)); | |
1673 | else | |
1674 | { | |
1675 | insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem); | |
1676 | add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno)); | |
1677 | } | |
1678 | start_offset += increment; | |
1679 | } | |
1680 | RTX_FRAME_RELATED_P (insn) = 1; | |
1681 | } | |
1682 | } | |
1683 | ||
1684 | } | |
1685 | ||
1686 | ||
1687 | /* offset from the stack pointer of where the saves and | |
1688 | restore's have to happen. */ | |
1689 | static void | |
1690 | aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset, | |
1691 | bool restore) | |
1692 | { | |
1693 | rtx insn; | |
1694 | rtx base_rtx = stack_pointer_rtx; | |
1695 | HOST_WIDE_INT start_offset = offset; | |
1696 | HOST_WIDE_INT increment = UNITS_PER_WORD; | |
1697 | rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM; | |
1698 | unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM; | |
1699 | unsigned regno; | |
1700 | unsigned regno2; | |
1701 | ||
1702 | for (regno = R0_REGNUM; regno <= limit; regno++) | |
1703 | { | |
1704 | if (aarch64_register_saved_on_entry (regno)) | |
1705 | { | |
1706 | rtx mem; | |
1707 | mem = gen_mem_ref (Pmode, | |
1708 | plus_constant (Pmode, | |
1709 | base_rtx, | |
1710 | start_offset)); | |
1711 | ||
1712 | for (regno2 = regno + 1; | |
1713 | regno2 <= limit | |
1714 | && !aarch64_register_saved_on_entry (regno2); | |
1715 | regno2++) | |
1716 | { | |
1717 | /* Empty loop. */ | |
1718 | } | |
1719 | if (regno2 <= limit && | |
1720 | aarch64_register_saved_on_entry (regno2)) | |
1721 | { | |
1722 | rtx mem2; | |
1723 | /* Next highest register to be saved. */ | |
1724 | mem2 = gen_mem_ref (Pmode, | |
1725 | plus_constant | |
1726 | (Pmode, | |
1727 | base_rtx, | |
1728 | start_offset + increment)); | |
1729 | if (restore == false) | |
1730 | { | |
1731 | insn = emit_insn | |
1732 | ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno), | |
1733 | mem2, gen_rtx_REG (DImode, regno2))); | |
1734 | ||
1735 | } | |
1736 | else | |
1737 | { | |
1738 | insn = emit_insn | |
1739 | ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem, | |
1740 | gen_rtx_REG (DImode, regno2), mem2)); | |
1741 | ||
1742 | add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno)); | |
1743 | add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2)); | |
1744 | } | |
1745 | ||
1746 | /* The first part of a frame-related parallel insn | |
1747 | is always assumed to be relevant to the frame | |
1748 | calculations; subsequent parts, are only | |
1749 | frame-related if explicitly marked. */ | |
1750 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, | |
1751 | 1)) = 1; | |
1752 | regno = regno2; | |
1753 | start_offset += increment * 2; | |
1754 | } | |
1755 | else | |
1756 | { | |
1757 | if (restore == false) | |
1758 | insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno)); | |
1759 | else | |
1760 | { | |
1761 | insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem); | |
1762 | add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno)); | |
1763 | } | |
1764 | start_offset += increment; | |
1765 | } | |
1766 | RTX_FRAME_RELATED_P (insn) = 1; | |
1767 | } | |
1768 | } | |
1769 | ||
1770 | aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx); | |
1771 | ||
1772 | } | |
1773 | ||
1774 | /* AArch64 stack frames generated by this compiler look like: | |
1775 | ||
1776 | +-------------------------------+ | |
1777 | | | | |
1778 | | incoming stack arguments | | |
1779 | | | | |
1780 | +-------------------------------+ <-- arg_pointer_rtx | |
1781 | | | | |
1782 | | callee-allocated save area | | |
1783 | | for register varargs | | |
1784 | | | | |
1785 | +-------------------------------+ | |
1786 | | | | |
1787 | | local variables | | |
1788 | | | | |
1789 | +-------------------------------+ <-- frame_pointer_rtx | |
1790 | | | | |
1791 | | callee-saved registers | | |
1792 | | | | |
1793 | +-------------------------------+ | |
1794 | | LR' | | |
1795 | +-------------------------------+ | |
1796 | | FP' | | |
1797 | P +-------------------------------+ <-- hard_frame_pointer_rtx | |
1798 | | dynamic allocation | | |
1799 | +-------------------------------+ | |
1800 | | | | |
1801 | | outgoing stack arguments | | |
1802 | | | | |
1803 | +-------------------------------+ <-- stack_pointer_rtx | |
1804 | ||
1805 | Dynamic stack allocations such as alloca insert data at point P. | |
1806 | They decrease stack_pointer_rtx but leave frame_pointer_rtx and | |
1807 | hard_frame_pointer_rtx unchanged. */ | |
1808 | ||
1809 | /* Generate the prologue instructions for entry into a function. | |
1810 | Establish the stack frame by decreasing the stack pointer with a | |
1811 | properly calculated size and, if necessary, create a frame record | |
1812 | filled with the values of LR and previous frame pointer. The | |
1813 | current FP is also set up is it is in use. */ | |
1814 | ||
1815 | void | |
1816 | aarch64_expand_prologue (void) | |
1817 | { | |
1818 | /* sub sp, sp, #<frame_size> | |
1819 | stp {fp, lr}, [sp, #<frame_size> - 16] | |
1820 | add fp, sp, #<frame_size> - hardfp_offset | |
1821 | stp {cs_reg}, [fp, #-16] etc. | |
1822 | ||
1823 | sub sp, sp, <final_adjustment_if_any> | |
1824 | */ | |
1825 | HOST_WIDE_INT original_frame_size; /* local variables + vararg save */ | |
1826 | HOST_WIDE_INT frame_size, offset; | |
1827 | HOST_WIDE_INT fp_offset; /* FP offset from SP */ | |
1828 | rtx insn; | |
1829 | ||
1830 | aarch64_layout_frame (); | |
1831 | original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size; | |
1832 | gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg) | |
1833 | && (cfun->stdarg || !cfun->machine->saved_varargs_size)); | |
1834 | frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size | |
1835 | + crtl->outgoing_args_size); | |
1836 | offset = frame_size = AARCH64_ROUND_UP (frame_size, | |
1837 | STACK_BOUNDARY / BITS_PER_UNIT); | |
1838 | ||
1839 | if (flag_stack_usage_info) | |
1840 | current_function_static_stack_size = frame_size; | |
1841 | ||
1842 | fp_offset = (offset | |
1843 | - original_frame_size | |
1844 | - cfun->machine->frame.saved_regs_size); | |
1845 | ||
44c0e7b9 | 1846 | /* Store pairs and load pairs have a range only -512 to 504. */ |
43e9d192 IB |
1847 | if (offset >= 512) |
1848 | { | |
1849 | /* When the frame has a large size, an initial decrease is done on | |
1850 | the stack pointer to jump over the callee-allocated save area for | |
1851 | register varargs, the local variable area and/or the callee-saved | |
1852 | register area. This will allow the pre-index write-back | |
1853 | store pair instructions to be used for setting up the stack frame | |
1854 | efficiently. */ | |
1855 | offset = original_frame_size + cfun->machine->frame.saved_regs_size; | |
1856 | if (offset >= 512) | |
1857 | offset = cfun->machine->frame.saved_regs_size; | |
1858 | ||
1859 | frame_size -= (offset + crtl->outgoing_args_size); | |
1860 | fp_offset = 0; | |
1861 | ||
1862 | if (frame_size >= 0x1000000) | |
1863 | { | |
1864 | rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM); | |
1865 | emit_move_insn (op0, GEN_INT (-frame_size)); | |
1866 | emit_insn (gen_add2_insn (stack_pointer_rtx, op0)); | |
1867 | aarch64_set_frame_expr (gen_rtx_SET | |
1868 | (Pmode, stack_pointer_rtx, | |
1869 | gen_rtx_PLUS (Pmode, | |
1870 | stack_pointer_rtx, | |
1871 | GEN_INT (-frame_size)))); | |
1872 | } | |
1873 | else if (frame_size > 0) | |
1874 | { | |
1875 | if ((frame_size & 0xfff) != frame_size) | |
1876 | { | |
1877 | insn = emit_insn (gen_add2_insn | |
1878 | (stack_pointer_rtx, | |
1879 | GEN_INT (-(frame_size | |
1880 | & ~(HOST_WIDE_INT)0xfff)))); | |
1881 | RTX_FRAME_RELATED_P (insn) = 1; | |
1882 | } | |
1883 | if ((frame_size & 0xfff) != 0) | |
1884 | { | |
1885 | insn = emit_insn (gen_add2_insn | |
1886 | (stack_pointer_rtx, | |
1887 | GEN_INT (-(frame_size | |
1888 | & (HOST_WIDE_INT)0xfff)))); | |
1889 | RTX_FRAME_RELATED_P (insn) = 1; | |
1890 | } | |
1891 | } | |
1892 | } | |
1893 | else | |
1894 | frame_size = -1; | |
1895 | ||
1896 | if (offset > 0) | |
1897 | { | |
1898 | /* Save the frame pointer and lr if the frame pointer is needed | |
1899 | first. Make the frame pointer point to the location of the | |
1900 | old frame pointer on the stack. */ | |
1901 | if (frame_pointer_needed) | |
1902 | { | |
1903 | rtx mem_fp, mem_lr; | |
1904 | ||
1905 | if (fp_offset) | |
1906 | { | |
1907 | insn = emit_insn (gen_add2_insn (stack_pointer_rtx, | |
1908 | GEN_INT (-offset))); | |
1909 | RTX_FRAME_RELATED_P (insn) = 1; | |
1910 | aarch64_set_frame_expr (gen_rtx_SET | |
1911 | (Pmode, stack_pointer_rtx, | |
1912 | gen_rtx_MINUS (Pmode, | |
1913 | stack_pointer_rtx, | |
1914 | GEN_INT (offset)))); | |
1915 | mem_fp = gen_frame_mem (DImode, | |
1916 | plus_constant (Pmode, | |
1917 | stack_pointer_rtx, | |
1918 | fp_offset)); | |
1919 | mem_lr = gen_frame_mem (DImode, | |
1920 | plus_constant (Pmode, | |
1921 | stack_pointer_rtx, | |
1922 | fp_offset | |
1923 | + UNITS_PER_WORD)); | |
1924 | insn = emit_insn (gen_store_pairdi (mem_fp, | |
1925 | hard_frame_pointer_rtx, | |
1926 | mem_lr, | |
1927 | gen_rtx_REG (DImode, | |
1928 | LR_REGNUM))); | |
1929 | } | |
1930 | else | |
1931 | { | |
1932 | insn = emit_insn (gen_storewb_pairdi_di | |
1933 | (stack_pointer_rtx, stack_pointer_rtx, | |
1934 | hard_frame_pointer_rtx, | |
1935 | gen_rtx_REG (DImode, LR_REGNUM), | |
1936 | GEN_INT (-offset), | |
1937 | GEN_INT (GET_MODE_SIZE (DImode) - offset))); | |
1938 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1; | |
1939 | } | |
1940 | ||
1941 | /* The first part of a frame-related parallel insn is always | |
1942 | assumed to be relevant to the frame calculations; | |
1943 | subsequent parts, are only frame-related if explicitly | |
1944 | marked. */ | |
1945 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1; | |
1946 | RTX_FRAME_RELATED_P (insn) = 1; | |
1947 | ||
1948 | /* Set up frame pointer to point to the location of the | |
1949 | previous frame pointer on the stack. */ | |
1950 | insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx, | |
1951 | stack_pointer_rtx, | |
1952 | GEN_INT (fp_offset))); | |
1953 | aarch64_set_frame_expr (gen_rtx_SET | |
1954 | (Pmode, hard_frame_pointer_rtx, | |
1955 | gen_rtx_PLUS (Pmode, | |
1956 | stack_pointer_rtx, | |
1957 | GEN_INT (fp_offset)))); | |
1958 | RTX_FRAME_RELATED_P (insn) = 1; | |
1959 | insn = emit_insn (gen_stack_tie (stack_pointer_rtx, | |
1960 | hard_frame_pointer_rtx)); | |
1961 | } | |
1962 | else | |
1963 | { | |
1964 | insn = emit_insn (gen_add2_insn (stack_pointer_rtx, | |
1965 | GEN_INT (-offset))); | |
1966 | RTX_FRAME_RELATED_P (insn) = 1; | |
1967 | } | |
1968 | ||
1969 | aarch64_save_or_restore_callee_save_registers | |
1970 | (fp_offset + cfun->machine->frame.hardfp_offset, 0); | |
1971 | } | |
1972 | ||
1973 | /* when offset >= 512, | |
1974 | sub sp, sp, #<outgoing_args_size> */ | |
1975 | if (frame_size > -1) | |
1976 | { | |
1977 | if (crtl->outgoing_args_size > 0) | |
1978 | { | |
1979 | insn = emit_insn (gen_add2_insn | |
1980 | (stack_pointer_rtx, | |
1981 | GEN_INT (- crtl->outgoing_args_size))); | |
1982 | RTX_FRAME_RELATED_P (insn) = 1; | |
1983 | } | |
1984 | } | |
1985 | } | |
1986 | ||
1987 | /* Generate the epilogue instructions for returning from a function. */ | |
1988 | void | |
1989 | aarch64_expand_epilogue (bool for_sibcall) | |
1990 | { | |
1991 | HOST_WIDE_INT original_frame_size, frame_size, offset; | |
1992 | HOST_WIDE_INT fp_offset; | |
1993 | rtx insn; | |
44c0e7b9 | 1994 | rtx cfa_reg; |
43e9d192 IB |
1995 | |
1996 | aarch64_layout_frame (); | |
1997 | original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size; | |
1998 | frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size | |
1999 | + crtl->outgoing_args_size); | |
2000 | offset = frame_size = AARCH64_ROUND_UP (frame_size, | |
2001 | STACK_BOUNDARY / BITS_PER_UNIT); | |
2002 | ||
2003 | fp_offset = (offset | |
2004 | - original_frame_size | |
2005 | - cfun->machine->frame.saved_regs_size); | |
2006 | ||
44c0e7b9 YZ |
2007 | cfa_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx; |
2008 | ||
2009 | /* Store pairs and load pairs have a range only -512 to 504. */ | |
43e9d192 IB |
2010 | if (offset >= 512) |
2011 | { | |
2012 | offset = original_frame_size + cfun->machine->frame.saved_regs_size; | |
2013 | if (offset >= 512) | |
2014 | offset = cfun->machine->frame.saved_regs_size; | |
2015 | ||
2016 | frame_size -= (offset + crtl->outgoing_args_size); | |
2017 | fp_offset = 0; | |
2018 | if (!frame_pointer_needed && crtl->outgoing_args_size > 0) | |
2019 | { | |
2020 | insn = emit_insn (gen_add2_insn | |
2021 | (stack_pointer_rtx, | |
2022 | GEN_INT (crtl->outgoing_args_size))); | |
2023 | RTX_FRAME_RELATED_P (insn) = 1; | |
2024 | } | |
2025 | } | |
2026 | else | |
2027 | frame_size = -1; | |
2028 | ||
2029 | /* If there were outgoing arguments or we've done dynamic stack | |
2030 | allocation, then restore the stack pointer from the frame | |
2031 | pointer. This is at most one insn and more efficient than using | |
2032 | GCC's internal mechanism. */ | |
2033 | if (frame_pointer_needed | |
2034 | && (crtl->outgoing_args_size || cfun->calls_alloca)) | |
2035 | { | |
2036 | insn = emit_insn (gen_add3_insn (stack_pointer_rtx, | |
2037 | hard_frame_pointer_rtx, | |
2038 | GEN_INT (- fp_offset))); | |
2039 | RTX_FRAME_RELATED_P (insn) = 1; | |
44c0e7b9 YZ |
2040 | /* As SP is set to (FP - fp_offset), according to the rules in |
2041 | dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated | |
2042 | from the value of SP from now on. */ | |
2043 | cfa_reg = stack_pointer_rtx; | |
43e9d192 IB |
2044 | } |
2045 | ||
2046 | aarch64_save_or_restore_callee_save_registers | |
2047 | (fp_offset + cfun->machine->frame.hardfp_offset, 1); | |
2048 | ||
2049 | /* Restore the frame pointer and lr if the frame pointer is needed. */ | |
2050 | if (offset > 0) | |
2051 | { | |
2052 | if (frame_pointer_needed) | |
2053 | { | |
2054 | rtx mem_fp, mem_lr; | |
2055 | ||
2056 | if (fp_offset) | |
2057 | { | |
2058 | mem_fp = gen_frame_mem (DImode, | |
2059 | plus_constant (Pmode, | |
2060 | stack_pointer_rtx, | |
2061 | fp_offset)); | |
2062 | mem_lr = gen_frame_mem (DImode, | |
2063 | plus_constant (Pmode, | |
2064 | stack_pointer_rtx, | |
2065 | fp_offset | |
2066 | + UNITS_PER_WORD)); | |
2067 | insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx, | |
2068 | mem_fp, | |
2069 | gen_rtx_REG (DImode, | |
2070 | LR_REGNUM), | |
2071 | mem_lr)); | |
2072 | } | |
2073 | else | |
2074 | { | |
2075 | insn = emit_insn (gen_loadwb_pairdi_di | |
2076 | (stack_pointer_rtx, | |
2077 | stack_pointer_rtx, | |
2078 | hard_frame_pointer_rtx, | |
2079 | gen_rtx_REG (DImode, LR_REGNUM), | |
2080 | GEN_INT (offset), | |
2081 | GEN_INT (GET_MODE_SIZE (DImode) + offset))); | |
2082 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1; | |
44c0e7b9 YZ |
2083 | add_reg_note (insn, REG_CFA_ADJUST_CFA, |
2084 | (gen_rtx_SET (Pmode, stack_pointer_rtx, | |
dc2d3c67 YZ |
2085 | plus_constant (Pmode, cfa_reg, |
2086 | offset)))); | |
43e9d192 IB |
2087 | } |
2088 | ||
2089 | /* The first part of a frame-related parallel insn | |
2090 | is always assumed to be relevant to the frame | |
2091 | calculations; subsequent parts, are only | |
2092 | frame-related if explicitly marked. */ | |
2093 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1; | |
2094 | RTX_FRAME_RELATED_P (insn) = 1; | |
2095 | add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx); | |
2096 | add_reg_note (insn, REG_CFA_RESTORE, | |
2097 | gen_rtx_REG (DImode, LR_REGNUM)); | |
2098 | ||
2099 | if (fp_offset) | |
2100 | { | |
2101 | insn = emit_insn (gen_add2_insn (stack_pointer_rtx, | |
2102 | GEN_INT (offset))); | |
2103 | RTX_FRAME_RELATED_P (insn) = 1; | |
2104 | } | |
2105 | } | |
43e9d192 IB |
2106 | else |
2107 | { | |
2108 | insn = emit_insn (gen_add2_insn (stack_pointer_rtx, | |
2109 | GEN_INT (offset))); | |
2110 | RTX_FRAME_RELATED_P (insn) = 1; | |
2111 | } | |
2112 | } | |
2113 | ||
2114 | /* Stack adjustment for exception handler. */ | |
2115 | if (crtl->calls_eh_return) | |
2116 | { | |
2117 | /* We need to unwind the stack by the offset computed by | |
2118 | EH_RETURN_STACKADJ_RTX. However, at this point the CFA is | |
2119 | based on SP. Ideally we would update the SP and define the | |
2120 | CFA along the lines of: | |
2121 | ||
2122 | SP = SP + EH_RETURN_STACKADJ_RTX | |
2123 | (regnote CFA = SP - EH_RETURN_STACKADJ_RTX) | |
2124 | ||
2125 | However the dwarf emitter only understands a constant | |
2126 | register offset. | |
2127 | ||
2128 | The solution choosen here is to use the otherwise unused IP0 | |
2129 | as a temporary register to hold the current SP value. The | |
2130 | CFA is described using IP0 then SP is modified. */ | |
2131 | ||
2132 | rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM); | |
2133 | ||
2134 | insn = emit_move_insn (ip0, stack_pointer_rtx); | |
2135 | add_reg_note (insn, REG_CFA_DEF_CFA, ip0); | |
2136 | RTX_FRAME_RELATED_P (insn) = 1; | |
2137 | ||
2138 | emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX)); | |
2139 | ||
2140 | /* Ensure the assignment to IP0 does not get optimized away. */ | |
2141 | emit_use (ip0); | |
2142 | } | |
2143 | ||
2144 | if (frame_size > -1) | |
2145 | { | |
2146 | if (frame_size >= 0x1000000) | |
2147 | { | |
2148 | rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM); | |
2149 | emit_move_insn (op0, GEN_INT (frame_size)); | |
2150 | emit_insn (gen_add2_insn (stack_pointer_rtx, op0)); | |
2151 | aarch64_set_frame_expr (gen_rtx_SET | |
2152 | (Pmode, stack_pointer_rtx, | |
2153 | gen_rtx_PLUS (Pmode, | |
2154 | stack_pointer_rtx, | |
2155 | GEN_INT (frame_size)))); | |
2156 | } | |
2157 | else if (frame_size > 0) | |
2158 | { | |
2159 | if ((frame_size & 0xfff) != 0) | |
2160 | { | |
2161 | insn = emit_insn (gen_add2_insn | |
2162 | (stack_pointer_rtx, | |
2163 | GEN_INT ((frame_size | |
2164 | & (HOST_WIDE_INT) 0xfff)))); | |
2165 | RTX_FRAME_RELATED_P (insn) = 1; | |
2166 | } | |
2167 | if ((frame_size & 0xfff) != frame_size) | |
2168 | { | |
2169 | insn = emit_insn (gen_add2_insn | |
2170 | (stack_pointer_rtx, | |
2171 | GEN_INT ((frame_size | |
2172 | & ~ (HOST_WIDE_INT) 0xfff)))); | |
2173 | RTX_FRAME_RELATED_P (insn) = 1; | |
2174 | } | |
2175 | } | |
2176 | ||
2177 | aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx, | |
2178 | gen_rtx_PLUS (Pmode, | |
2179 | stack_pointer_rtx, | |
2180 | GEN_INT (offset)))); | |
2181 | } | |
2182 | ||
2183 | emit_use (gen_rtx_REG (DImode, LR_REGNUM)); | |
2184 | if (!for_sibcall) | |
2185 | emit_jump_insn (ret_rtx); | |
2186 | } | |
2187 | ||
2188 | /* Return the place to copy the exception unwinding return address to. | |
2189 | This will probably be a stack slot, but could (in theory be the | |
2190 | return register). */ | |
2191 | rtx | |
2192 | aarch64_final_eh_return_addr (void) | |
2193 | { | |
2194 | HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset; | |
2195 | aarch64_layout_frame (); | |
2196 | original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size; | |
2197 | frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size | |
2198 | + crtl->outgoing_args_size); | |
2199 | offset = frame_size = AARCH64_ROUND_UP (frame_size, | |
2200 | STACK_BOUNDARY / BITS_PER_UNIT); | |
2201 | fp_offset = offset | |
2202 | - original_frame_size | |
2203 | - cfun->machine->frame.saved_regs_size; | |
2204 | ||
2205 | if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0) | |
2206 | return gen_rtx_REG (DImode, LR_REGNUM); | |
2207 | ||
2208 | /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can | |
2209 | result in a store to save LR introduced by builtin_eh_return () being | |
2210 | incorrectly deleted because the alias is not detected. | |
2211 | So in the calculation of the address to copy the exception unwinding | |
2212 | return address to, we note 2 cases. | |
2213 | If FP is needed and the fp_offset is 0, it means that SP = FP and hence | |
2214 | we return a SP-relative location since all the addresses are SP-relative | |
2215 | in this case. This prevents the store from being optimized away. | |
2216 | If the fp_offset is not 0, then the addresses will be FP-relative and | |
2217 | therefore we return a FP-relative location. */ | |
2218 | ||
2219 | if (frame_pointer_needed) | |
2220 | { | |
2221 | if (fp_offset) | |
2222 | return gen_frame_mem (DImode, | |
2223 | plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD)); | |
2224 | else | |
2225 | return gen_frame_mem (DImode, | |
2226 | plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD)); | |
2227 | } | |
2228 | ||
2229 | /* If FP is not needed, we calculate the location of LR, which would be | |
2230 | at the top of the saved registers block. */ | |
2231 | ||
2232 | return gen_frame_mem (DImode, | |
2233 | plus_constant (Pmode, | |
2234 | stack_pointer_rtx, | |
2235 | fp_offset | |
2236 | + cfun->machine->frame.saved_regs_size | |
2237 | - 2 * UNITS_PER_WORD)); | |
2238 | } | |
2239 | ||
2240 | /* Output code to build up a constant in a register. */ | |
2241 | static void | |
d9600ae5 | 2242 | aarch64_build_constant (int regnum, HOST_WIDE_INT val) |
43e9d192 IB |
2243 | { |
2244 | if (aarch64_bitmask_imm (val, DImode)) | |
d9600ae5 | 2245 | emit_move_insn (gen_rtx_REG (Pmode, regnum), GEN_INT (val)); |
43e9d192 IB |
2246 | else |
2247 | { | |
2248 | int i; | |
2249 | int ncount = 0; | |
2250 | int zcount = 0; | |
2251 | HOST_WIDE_INT valp = val >> 16; | |
2252 | HOST_WIDE_INT valm; | |
2253 | HOST_WIDE_INT tval; | |
2254 | ||
2255 | for (i = 16; i < 64; i += 16) | |
2256 | { | |
2257 | valm = (valp & 0xffff); | |
2258 | ||
2259 | if (valm != 0) | |
2260 | ++ zcount; | |
2261 | ||
2262 | if (valm != 0xffff) | |
2263 | ++ ncount; | |
2264 | ||
2265 | valp >>= 16; | |
2266 | } | |
2267 | ||
2268 | /* zcount contains the number of additional MOVK instructions | |
2269 | required if the constant is built up with an initial MOVZ instruction, | |
2270 | while ncount is the number of MOVK instructions required if starting | |
2271 | with a MOVN instruction. Choose the sequence that yields the fewest | |
2272 | number of instructions, preferring MOVZ instructions when they are both | |
2273 | the same. */ | |
2274 | if (ncount < zcount) | |
2275 | { | |
d9600ae5 SN |
2276 | emit_move_insn (gen_rtx_REG (Pmode, regnum), |
2277 | GEN_INT ((~val) & 0xffff)); | |
43e9d192 IB |
2278 | tval = 0xffff; |
2279 | } | |
2280 | else | |
2281 | { | |
d9600ae5 SN |
2282 | emit_move_insn (gen_rtx_REG (Pmode, regnum), |
2283 | GEN_INT (val & 0xffff)); | |
43e9d192 IB |
2284 | tval = 0; |
2285 | } | |
2286 | ||
2287 | val >>= 16; | |
2288 | ||
2289 | for (i = 16; i < 64; i += 16) | |
2290 | { | |
2291 | if ((val & 0xffff) != tval) | |
d9600ae5 SN |
2292 | emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode, regnum), |
2293 | GEN_INT (i), GEN_INT (val & 0xffff))); | |
43e9d192 IB |
2294 | val >>= 16; |
2295 | } | |
2296 | } | |
2297 | } | |
2298 | ||
2299 | static void | |
d9600ae5 | 2300 | aarch64_add_constant (int regnum, int scratchreg, HOST_WIDE_INT delta) |
43e9d192 IB |
2301 | { |
2302 | HOST_WIDE_INT mdelta = delta; | |
d9600ae5 SN |
2303 | rtx this_rtx = gen_rtx_REG (Pmode, regnum); |
2304 | rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg); | |
43e9d192 IB |
2305 | |
2306 | if (mdelta < 0) | |
2307 | mdelta = -mdelta; | |
2308 | ||
2309 | if (mdelta >= 4096 * 4096) | |
2310 | { | |
d9600ae5 SN |
2311 | aarch64_build_constant (scratchreg, delta); |
2312 | emit_insn (gen_add3_insn (this_rtx, this_rtx, scratch_rtx)); | |
43e9d192 IB |
2313 | } |
2314 | else if (mdelta > 0) | |
2315 | { | |
43e9d192 | 2316 | if (mdelta >= 4096) |
d9600ae5 SN |
2317 | { |
2318 | emit_insn (gen_rtx_SET (Pmode, scratch_rtx, GEN_INT (mdelta / 4096))); | |
2319 | rtx shift = gen_rtx_ASHIFT (Pmode, scratch_rtx, GEN_INT (12)); | |
2320 | if (delta < 0) | |
2321 | emit_insn (gen_rtx_SET (Pmode, this_rtx, | |
2322 | gen_rtx_MINUS (Pmode, this_rtx, shift))); | |
2323 | else | |
2324 | emit_insn (gen_rtx_SET (Pmode, this_rtx, | |
2325 | gen_rtx_PLUS (Pmode, this_rtx, shift))); | |
2326 | } | |
43e9d192 | 2327 | if (mdelta % 4096 != 0) |
d9600ae5 SN |
2328 | { |
2329 | scratch_rtx = GEN_INT ((delta < 0 ? -1 : 1) * (mdelta % 4096)); | |
2330 | emit_insn (gen_rtx_SET (Pmode, this_rtx, | |
2331 | gen_rtx_PLUS (Pmode, this_rtx, scratch_rtx))); | |
2332 | } | |
43e9d192 IB |
2333 | } |
2334 | } | |
2335 | ||
2336 | /* Output code to add DELTA to the first argument, and then jump | |
2337 | to FUNCTION. Used for C++ multiple inheritance. */ | |
2338 | static void | |
2339 | aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED, | |
2340 | HOST_WIDE_INT delta, | |
2341 | HOST_WIDE_INT vcall_offset, | |
2342 | tree function) | |
2343 | { | |
2344 | /* The this pointer is always in x0. Note that this differs from | |
2345 | Arm where the this pointer maybe bumped to r1 if r0 is required | |
2346 | to return a pointer to an aggregate. On AArch64 a result value | |
2347 | pointer will be in x8. */ | |
2348 | int this_regno = R0_REGNUM; | |
75f1d6fc | 2349 | rtx this_rtx, temp0, temp1, addr, insn, funexp; |
43e9d192 | 2350 | |
75f1d6fc SN |
2351 | reload_completed = 1; |
2352 | emit_note (NOTE_INSN_PROLOGUE_END); | |
43e9d192 IB |
2353 | |
2354 | if (vcall_offset == 0) | |
d9600ae5 | 2355 | aarch64_add_constant (this_regno, IP1_REGNUM, delta); |
43e9d192 IB |
2356 | else |
2357 | { | |
2358 | gcc_assert ((vcall_offset & 0x7) == 0); | |
2359 | ||
75f1d6fc SN |
2360 | this_rtx = gen_rtx_REG (Pmode, this_regno); |
2361 | temp0 = gen_rtx_REG (Pmode, IP0_REGNUM); | |
2362 | temp1 = gen_rtx_REG (Pmode, IP1_REGNUM); | |
43e9d192 | 2363 | |
75f1d6fc SN |
2364 | addr = this_rtx; |
2365 | if (delta != 0) | |
2366 | { | |
2367 | if (delta >= -256 && delta < 256) | |
2368 | addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx, | |
2369 | plus_constant (Pmode, this_rtx, delta)); | |
2370 | else | |
d9600ae5 | 2371 | aarch64_add_constant (this_regno, IP1_REGNUM, delta); |
43e9d192 IB |
2372 | } |
2373 | ||
75f1d6fc SN |
2374 | aarch64_emit_move (temp0, gen_rtx_MEM (Pmode, addr)); |
2375 | ||
43e9d192 | 2376 | if (vcall_offset >= -256 && vcall_offset < 32768) |
75f1d6fc | 2377 | addr = plus_constant (Pmode, temp0, vcall_offset); |
43e9d192 IB |
2378 | else |
2379 | { | |
d9600ae5 | 2380 | aarch64_build_constant (IP1_REGNUM, vcall_offset); |
75f1d6fc | 2381 | addr = gen_rtx_PLUS (Pmode, temp0, temp1); |
43e9d192 IB |
2382 | } |
2383 | ||
75f1d6fc SN |
2384 | aarch64_emit_move (temp1, gen_rtx_MEM (Pmode,addr)); |
2385 | emit_insn (gen_add2_insn (this_rtx, temp1)); | |
43e9d192 IB |
2386 | } |
2387 | ||
75f1d6fc SN |
2388 | /* Generate a tail call to the target function. */ |
2389 | if (!TREE_USED (function)) | |
2390 | { | |
2391 | assemble_external (function); | |
2392 | TREE_USED (function) = 1; | |
2393 | } | |
2394 | funexp = XEXP (DECL_RTL (function), 0); | |
2395 | funexp = gen_rtx_MEM (FUNCTION_MODE, funexp); | |
2396 | insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX)); | |
2397 | SIBLING_CALL_P (insn) = 1; | |
2398 | ||
2399 | insn = get_insns (); | |
2400 | shorten_branches (insn); | |
2401 | final_start_function (insn, file, 1); | |
2402 | final (insn, file, 1); | |
43e9d192 | 2403 | final_end_function (); |
75f1d6fc SN |
2404 | |
2405 | /* Stop pretending to be a post-reload pass. */ | |
2406 | reload_completed = 0; | |
43e9d192 IB |
2407 | } |
2408 | ||
43e9d192 IB |
2409 | static int |
2410 | aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED) | |
2411 | { | |
2412 | if (GET_CODE (*x) == SYMBOL_REF) | |
2413 | return SYMBOL_REF_TLS_MODEL (*x) != 0; | |
2414 | ||
2415 | /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are | |
2416 | TLS offsets, not real symbol references. */ | |
2417 | if (GET_CODE (*x) == UNSPEC | |
2418 | && XINT (*x, 1) == UNSPEC_TLS) | |
2419 | return -1; | |
2420 | ||
2421 | return 0; | |
2422 | } | |
2423 | ||
2424 | static bool | |
2425 | aarch64_tls_referenced_p (rtx x) | |
2426 | { | |
2427 | if (!TARGET_HAVE_TLS) | |
2428 | return false; | |
2429 | ||
2430 | return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL); | |
2431 | } | |
2432 | ||
2433 | ||
2434 | static int | |
2435 | aarch64_bitmasks_cmp (const void *i1, const void *i2) | |
2436 | { | |
2437 | const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1; | |
2438 | const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2; | |
2439 | ||
2440 | if (*imm1 < *imm2) | |
2441 | return -1; | |
2442 | if (*imm1 > *imm2) | |
2443 | return +1; | |
2444 | return 0; | |
2445 | } | |
2446 | ||
2447 | ||
2448 | static void | |
2449 | aarch64_build_bitmask_table (void) | |
2450 | { | |
2451 | unsigned HOST_WIDE_INT mask, imm; | |
2452 | unsigned int log_e, e, s, r; | |
2453 | unsigned int nimms = 0; | |
2454 | ||
2455 | for (log_e = 1; log_e <= 6; log_e++) | |
2456 | { | |
2457 | e = 1 << log_e; | |
2458 | if (e == 64) | |
2459 | mask = ~(HOST_WIDE_INT) 0; | |
2460 | else | |
2461 | mask = ((HOST_WIDE_INT) 1 << e) - 1; | |
2462 | for (s = 1; s < e; s++) | |
2463 | { | |
2464 | for (r = 0; r < e; r++) | |
2465 | { | |
2466 | /* set s consecutive bits to 1 (s < 64) */ | |
2467 | imm = ((unsigned HOST_WIDE_INT)1 << s) - 1; | |
2468 | /* rotate right by r */ | |
2469 | if (r != 0) | |
2470 | imm = ((imm >> r) | (imm << (e - r))) & mask; | |
2471 | /* replicate the constant depending on SIMD size */ | |
2472 | switch (log_e) { | |
2473 | case 1: imm |= (imm << 2); | |
2474 | case 2: imm |= (imm << 4); | |
2475 | case 3: imm |= (imm << 8); | |
2476 | case 4: imm |= (imm << 16); | |
2477 | case 5: imm |= (imm << 32); | |
2478 | case 6: | |
2479 | break; | |
2480 | default: | |
2481 | gcc_unreachable (); | |
2482 | } | |
2483 | gcc_assert (nimms < AARCH64_NUM_BITMASKS); | |
2484 | aarch64_bitmasks[nimms++] = imm; | |
2485 | } | |
2486 | } | |
2487 | } | |
2488 | ||
2489 | gcc_assert (nimms == AARCH64_NUM_BITMASKS); | |
2490 | qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]), | |
2491 | aarch64_bitmasks_cmp); | |
2492 | } | |
2493 | ||
2494 | ||
2495 | /* Return true if val can be encoded as a 12-bit unsigned immediate with | |
2496 | a left shift of 0 or 12 bits. */ | |
2497 | bool | |
2498 | aarch64_uimm12_shift (HOST_WIDE_INT val) | |
2499 | { | |
2500 | return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val | |
2501 | || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val | |
2502 | ); | |
2503 | } | |
2504 | ||
2505 | ||
2506 | /* Return true if val is an immediate that can be loaded into a | |
2507 | register by a MOVZ instruction. */ | |
2508 | static bool | |
2509 | aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode) | |
2510 | { | |
2511 | if (GET_MODE_SIZE (mode) > 4) | |
2512 | { | |
2513 | if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val | |
2514 | || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val) | |
2515 | return 1; | |
2516 | } | |
2517 | else | |
2518 | { | |
2519 | /* Ignore sign extension. */ | |
2520 | val &= (HOST_WIDE_INT) 0xffffffff; | |
2521 | } | |
2522 | return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val | |
2523 | || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val); | |
2524 | } | |
2525 | ||
2526 | ||
2527 | /* Return true if val is a valid bitmask immediate. */ | |
2528 | bool | |
2529 | aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode) | |
2530 | { | |
2531 | if (GET_MODE_SIZE (mode) < 8) | |
2532 | { | |
2533 | /* Replicate bit pattern. */ | |
2534 | val &= (HOST_WIDE_INT) 0xffffffff; | |
2535 | val |= val << 32; | |
2536 | } | |
2537 | return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS, | |
2538 | sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL; | |
2539 | } | |
2540 | ||
2541 | ||
2542 | /* Return true if val is an immediate that can be loaded into a | |
2543 | register in a single instruction. */ | |
2544 | bool | |
2545 | aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode) | |
2546 | { | |
2547 | if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode)) | |
2548 | return 1; | |
2549 | return aarch64_bitmask_imm (val, mode); | |
2550 | } | |
2551 | ||
2552 | static bool | |
2553 | aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x) | |
2554 | { | |
2555 | rtx base, offset; | |
2556 | if (GET_CODE (x) == HIGH) | |
2557 | return true; | |
2558 | ||
2559 | split_const (x, &base, &offset); | |
2560 | if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF) | |
2561 | return (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR) != SYMBOL_FORCE_TO_MEM); | |
2562 | ||
2563 | return aarch64_tls_referenced_p (x); | |
2564 | } | |
2565 | ||
2566 | /* Return true if register REGNO is a valid index register. | |
2567 | STRICT_P is true if REG_OK_STRICT is in effect. */ | |
2568 | ||
2569 | bool | |
2570 | aarch64_regno_ok_for_index_p (int regno, bool strict_p) | |
2571 | { | |
2572 | if (!HARD_REGISTER_NUM_P (regno)) | |
2573 | { | |
2574 | if (!strict_p) | |
2575 | return true; | |
2576 | ||
2577 | if (!reg_renumber) | |
2578 | return false; | |
2579 | ||
2580 | regno = reg_renumber[regno]; | |
2581 | } | |
2582 | return GP_REGNUM_P (regno); | |
2583 | } | |
2584 | ||
2585 | /* Return true if register REGNO is a valid base register for mode MODE. | |
2586 | STRICT_P is true if REG_OK_STRICT is in effect. */ | |
2587 | ||
2588 | bool | |
2589 | aarch64_regno_ok_for_base_p (int regno, bool strict_p) | |
2590 | { | |
2591 | if (!HARD_REGISTER_NUM_P (regno)) | |
2592 | { | |
2593 | if (!strict_p) | |
2594 | return true; | |
2595 | ||
2596 | if (!reg_renumber) | |
2597 | return false; | |
2598 | ||
2599 | regno = reg_renumber[regno]; | |
2600 | } | |
2601 | ||
2602 | /* The fake registers will be eliminated to either the stack or | |
2603 | hard frame pointer, both of which are usually valid base registers. | |
2604 | Reload deals with the cases where the eliminated form isn't valid. */ | |
2605 | return (GP_REGNUM_P (regno) | |
2606 | || regno == SP_REGNUM | |
2607 | || regno == FRAME_POINTER_REGNUM | |
2608 | || regno == ARG_POINTER_REGNUM); | |
2609 | } | |
2610 | ||
2611 | /* Return true if X is a valid base register for mode MODE. | |
2612 | STRICT_P is true if REG_OK_STRICT is in effect. */ | |
2613 | ||
2614 | static bool | |
2615 | aarch64_base_register_rtx_p (rtx x, bool strict_p) | |
2616 | { | |
2617 | if (!strict_p && GET_CODE (x) == SUBREG) | |
2618 | x = SUBREG_REG (x); | |
2619 | ||
2620 | return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p)); | |
2621 | } | |
2622 | ||
2623 | /* Return true if address offset is a valid index. If it is, fill in INFO | |
2624 | appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */ | |
2625 | ||
2626 | static bool | |
2627 | aarch64_classify_index (struct aarch64_address_info *info, rtx x, | |
2628 | enum machine_mode mode, bool strict_p) | |
2629 | { | |
2630 | enum aarch64_address_type type; | |
2631 | rtx index; | |
2632 | int shift; | |
2633 | ||
2634 | /* (reg:P) */ | |
2635 | if ((REG_P (x) || GET_CODE (x) == SUBREG) | |
2636 | && GET_MODE (x) == Pmode) | |
2637 | { | |
2638 | type = ADDRESS_REG_REG; | |
2639 | index = x; | |
2640 | shift = 0; | |
2641 | } | |
2642 | /* (sign_extend:DI (reg:SI)) */ | |
2643 | else if ((GET_CODE (x) == SIGN_EXTEND | |
2644 | || GET_CODE (x) == ZERO_EXTEND) | |
2645 | && GET_MODE (x) == DImode | |
2646 | && GET_MODE (XEXP (x, 0)) == SImode) | |
2647 | { | |
2648 | type = (GET_CODE (x) == SIGN_EXTEND) | |
2649 | ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; | |
2650 | index = XEXP (x, 0); | |
2651 | shift = 0; | |
2652 | } | |
2653 | /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */ | |
2654 | else if (GET_CODE (x) == MULT | |
2655 | && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND | |
2656 | || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) | |
2657 | && GET_MODE (XEXP (x, 0)) == DImode | |
2658 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode | |
2659 | && CONST_INT_P (XEXP (x, 1))) | |
2660 | { | |
2661 | type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) | |
2662 | ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; | |
2663 | index = XEXP (XEXP (x, 0), 0); | |
2664 | shift = exact_log2 (INTVAL (XEXP (x, 1))); | |
2665 | } | |
2666 | /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */ | |
2667 | else if (GET_CODE (x) == ASHIFT | |
2668 | && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND | |
2669 | || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) | |
2670 | && GET_MODE (XEXP (x, 0)) == DImode | |
2671 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode | |
2672 | && CONST_INT_P (XEXP (x, 1))) | |
2673 | { | |
2674 | type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) | |
2675 | ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; | |
2676 | index = XEXP (XEXP (x, 0), 0); | |
2677 | shift = INTVAL (XEXP (x, 1)); | |
2678 | } | |
2679 | /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */ | |
2680 | else if ((GET_CODE (x) == SIGN_EXTRACT | |
2681 | || GET_CODE (x) == ZERO_EXTRACT) | |
2682 | && GET_MODE (x) == DImode | |
2683 | && GET_CODE (XEXP (x, 0)) == MULT | |
2684 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode | |
2685 | && CONST_INT_P (XEXP (XEXP (x, 0), 1))) | |
2686 | { | |
2687 | type = (GET_CODE (x) == SIGN_EXTRACT) | |
2688 | ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; | |
2689 | index = XEXP (XEXP (x, 0), 0); | |
2690 | shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1))); | |
2691 | if (INTVAL (XEXP (x, 1)) != 32 + shift | |
2692 | || INTVAL (XEXP (x, 2)) != 0) | |
2693 | shift = -1; | |
2694 | } | |
2695 | /* (and:DI (mult:DI (reg:DI) (const_int scale)) | |
2696 | (const_int 0xffffffff<<shift)) */ | |
2697 | else if (GET_CODE (x) == AND | |
2698 | && GET_MODE (x) == DImode | |
2699 | && GET_CODE (XEXP (x, 0)) == MULT | |
2700 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode | |
2701 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) | |
2702 | && CONST_INT_P (XEXP (x, 1))) | |
2703 | { | |
2704 | type = ADDRESS_REG_UXTW; | |
2705 | index = XEXP (XEXP (x, 0), 0); | |
2706 | shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1))); | |
2707 | if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift) | |
2708 | shift = -1; | |
2709 | } | |
2710 | /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */ | |
2711 | else if ((GET_CODE (x) == SIGN_EXTRACT | |
2712 | || GET_CODE (x) == ZERO_EXTRACT) | |
2713 | && GET_MODE (x) == DImode | |
2714 | && GET_CODE (XEXP (x, 0)) == ASHIFT | |
2715 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode | |
2716 | && CONST_INT_P (XEXP (XEXP (x, 0), 1))) | |
2717 | { | |
2718 | type = (GET_CODE (x) == SIGN_EXTRACT) | |
2719 | ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; | |
2720 | index = XEXP (XEXP (x, 0), 0); | |
2721 | shift = INTVAL (XEXP (XEXP (x, 0), 1)); | |
2722 | if (INTVAL (XEXP (x, 1)) != 32 + shift | |
2723 | || INTVAL (XEXP (x, 2)) != 0) | |
2724 | shift = -1; | |
2725 | } | |
2726 | /* (and:DI (ashift:DI (reg:DI) (const_int shift)) | |
2727 | (const_int 0xffffffff<<shift)) */ | |
2728 | else if (GET_CODE (x) == AND | |
2729 | && GET_MODE (x) == DImode | |
2730 | && GET_CODE (XEXP (x, 0)) == ASHIFT | |
2731 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode | |
2732 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) | |
2733 | && CONST_INT_P (XEXP (x, 1))) | |
2734 | { | |
2735 | type = ADDRESS_REG_UXTW; | |
2736 | index = XEXP (XEXP (x, 0), 0); | |
2737 | shift = INTVAL (XEXP (XEXP (x, 0), 1)); | |
2738 | if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift) | |
2739 | shift = -1; | |
2740 | } | |
2741 | /* (mult:P (reg:P) (const_int scale)) */ | |
2742 | else if (GET_CODE (x) == MULT | |
2743 | && GET_MODE (x) == Pmode | |
2744 | && GET_MODE (XEXP (x, 0)) == Pmode | |
2745 | && CONST_INT_P (XEXP (x, 1))) | |
2746 | { | |
2747 | type = ADDRESS_REG_REG; | |
2748 | index = XEXP (x, 0); | |
2749 | shift = exact_log2 (INTVAL (XEXP (x, 1))); | |
2750 | } | |
2751 | /* (ashift:P (reg:P) (const_int shift)) */ | |
2752 | else if (GET_CODE (x) == ASHIFT | |
2753 | && GET_MODE (x) == Pmode | |
2754 | && GET_MODE (XEXP (x, 0)) == Pmode | |
2755 | && CONST_INT_P (XEXP (x, 1))) | |
2756 | { | |
2757 | type = ADDRESS_REG_REG; | |
2758 | index = XEXP (x, 0); | |
2759 | shift = INTVAL (XEXP (x, 1)); | |
2760 | } | |
2761 | else | |
2762 | return false; | |
2763 | ||
2764 | if (GET_CODE (index) == SUBREG) | |
2765 | index = SUBREG_REG (index); | |
2766 | ||
2767 | if ((shift == 0 || | |
2768 | (shift > 0 && shift <= 3 | |
2769 | && (1 << shift) == GET_MODE_SIZE (mode))) | |
2770 | && REG_P (index) | |
2771 | && aarch64_regno_ok_for_index_p (REGNO (index), strict_p)) | |
2772 | { | |
2773 | info->type = type; | |
2774 | info->offset = index; | |
2775 | info->shift = shift; | |
2776 | return true; | |
2777 | } | |
2778 | ||
2779 | return false; | |
2780 | } | |
2781 | ||
2782 | static inline bool | |
2783 | offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset) | |
2784 | { | |
2785 | return (offset >= -64 * GET_MODE_SIZE (mode) | |
2786 | && offset < 64 * GET_MODE_SIZE (mode) | |
2787 | && offset % GET_MODE_SIZE (mode) == 0); | |
2788 | } | |
2789 | ||
2790 | static inline bool | |
2791 | offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED, | |
2792 | HOST_WIDE_INT offset) | |
2793 | { | |
2794 | return offset >= -256 && offset < 256; | |
2795 | } | |
2796 | ||
2797 | static inline bool | |
2798 | offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset) | |
2799 | { | |
2800 | return (offset >= 0 | |
2801 | && offset < 4096 * GET_MODE_SIZE (mode) | |
2802 | && offset % GET_MODE_SIZE (mode) == 0); | |
2803 | } | |
2804 | ||
2805 | /* Return true if X is a valid address for machine mode MODE. If it is, | |
2806 | fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in | |
2807 | effect. OUTER_CODE is PARALLEL for a load/store pair. */ | |
2808 | ||
2809 | static bool | |
2810 | aarch64_classify_address (struct aarch64_address_info *info, | |
2811 | rtx x, enum machine_mode mode, | |
2812 | RTX_CODE outer_code, bool strict_p) | |
2813 | { | |
2814 | enum rtx_code code = GET_CODE (x); | |
2815 | rtx op0, op1; | |
2816 | bool allow_reg_index_p = | |
2817 | outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16; | |
2818 | ||
2819 | /* Don't support anything other than POST_INC or REG addressing for | |
2820 | AdvSIMD. */ | |
2821 | if (aarch64_vector_mode_p (mode) | |
2822 | && (code != POST_INC && code != REG)) | |
2823 | return false; | |
2824 | ||
2825 | switch (code) | |
2826 | { | |
2827 | case REG: | |
2828 | case SUBREG: | |
2829 | info->type = ADDRESS_REG_IMM; | |
2830 | info->base = x; | |
2831 | info->offset = const0_rtx; | |
2832 | return aarch64_base_register_rtx_p (x, strict_p); | |
2833 | ||
2834 | case PLUS: | |
2835 | op0 = XEXP (x, 0); | |
2836 | op1 = XEXP (x, 1); | |
2837 | if (GET_MODE_SIZE (mode) != 0 | |
2838 | && CONST_INT_P (op1) | |
2839 | && aarch64_base_register_rtx_p (op0, strict_p)) | |
2840 | { | |
2841 | HOST_WIDE_INT offset = INTVAL (op1); | |
2842 | ||
2843 | info->type = ADDRESS_REG_IMM; | |
2844 | info->base = op0; | |
2845 | info->offset = op1; | |
2846 | ||
2847 | /* TImode and TFmode values are allowed in both pairs of X | |
2848 | registers and individual Q registers. The available | |
2849 | address modes are: | |
2850 | X,X: 7-bit signed scaled offset | |
2851 | Q: 9-bit signed offset | |
2852 | We conservatively require an offset representable in either mode. | |
2853 | */ | |
2854 | if (mode == TImode || mode == TFmode) | |
2855 | return (offset_7bit_signed_scaled_p (mode, offset) | |
2856 | && offset_9bit_signed_unscaled_p (mode, offset)); | |
2857 | ||
2858 | if (outer_code == PARALLEL) | |
2859 | return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8) | |
2860 | && offset_7bit_signed_scaled_p (mode, offset)); | |
2861 | else | |
2862 | return (offset_9bit_signed_unscaled_p (mode, offset) | |
2863 | || offset_12bit_unsigned_scaled_p (mode, offset)); | |
2864 | } | |
2865 | ||
2866 | if (allow_reg_index_p) | |
2867 | { | |
2868 | /* Look for base + (scaled/extended) index register. */ | |
2869 | if (aarch64_base_register_rtx_p (op0, strict_p) | |
2870 | && aarch64_classify_index (info, op1, mode, strict_p)) | |
2871 | { | |
2872 | info->base = op0; | |
2873 | return true; | |
2874 | } | |
2875 | if (aarch64_base_register_rtx_p (op1, strict_p) | |
2876 | && aarch64_classify_index (info, op0, mode, strict_p)) | |
2877 | { | |
2878 | info->base = op1; | |
2879 | return true; | |
2880 | } | |
2881 | } | |
2882 | ||
2883 | return false; | |
2884 | ||
2885 | case POST_INC: | |
2886 | case POST_DEC: | |
2887 | case PRE_INC: | |
2888 | case PRE_DEC: | |
2889 | info->type = ADDRESS_REG_WB; | |
2890 | info->base = XEXP (x, 0); | |
2891 | info->offset = NULL_RTX; | |
2892 | return aarch64_base_register_rtx_p (info->base, strict_p); | |
2893 | ||
2894 | case POST_MODIFY: | |
2895 | case PRE_MODIFY: | |
2896 | info->type = ADDRESS_REG_WB; | |
2897 | info->base = XEXP (x, 0); | |
2898 | if (GET_CODE (XEXP (x, 1)) == PLUS | |
2899 | && CONST_INT_P (XEXP (XEXP (x, 1), 1)) | |
2900 | && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base) | |
2901 | && aarch64_base_register_rtx_p (info->base, strict_p)) | |
2902 | { | |
2903 | HOST_WIDE_INT offset; | |
2904 | info->offset = XEXP (XEXP (x, 1), 1); | |
2905 | offset = INTVAL (info->offset); | |
2906 | ||
2907 | /* TImode and TFmode values are allowed in both pairs of X | |
2908 | registers and individual Q registers. The available | |
2909 | address modes are: | |
2910 | X,X: 7-bit signed scaled offset | |
2911 | Q: 9-bit signed offset | |
2912 | We conservatively require an offset representable in either mode. | |
2913 | */ | |
2914 | if (mode == TImode || mode == TFmode) | |
2915 | return (offset_7bit_signed_scaled_p (mode, offset) | |
2916 | && offset_9bit_signed_unscaled_p (mode, offset)); | |
2917 | ||
2918 | if (outer_code == PARALLEL) | |
2919 | return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8) | |
2920 | && offset_7bit_signed_scaled_p (mode, offset)); | |
2921 | else | |
2922 | return offset_9bit_signed_unscaled_p (mode, offset); | |
2923 | } | |
2924 | return false; | |
2925 | ||
2926 | case CONST: | |
2927 | case SYMBOL_REF: | |
2928 | case LABEL_REF: | |
79517551 SN |
2929 | /* load literal: pc-relative constant pool entry. Only supported |
2930 | for SI mode or larger. */ | |
43e9d192 | 2931 | info->type = ADDRESS_SYMBOLIC; |
79517551 | 2932 | if (outer_code != PARALLEL && GET_MODE_SIZE (mode) >= 4) |
43e9d192 IB |
2933 | { |
2934 | rtx sym, addend; | |
2935 | ||
2936 | split_const (x, &sym, &addend); | |
2937 | return (GET_CODE (sym) == LABEL_REF | |
2938 | || (GET_CODE (sym) == SYMBOL_REF | |
2939 | && CONSTANT_POOL_ADDRESS_P (sym))); | |
2940 | } | |
2941 | return false; | |
2942 | ||
2943 | case LO_SUM: | |
2944 | info->type = ADDRESS_LO_SUM; | |
2945 | info->base = XEXP (x, 0); | |
2946 | info->offset = XEXP (x, 1); | |
2947 | if (allow_reg_index_p | |
2948 | && aarch64_base_register_rtx_p (info->base, strict_p)) | |
2949 | { | |
2950 | rtx sym, offs; | |
2951 | split_const (info->offset, &sym, &offs); | |
2952 | if (GET_CODE (sym) == SYMBOL_REF | |
2953 | && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM) | |
2954 | == SYMBOL_SMALL_ABSOLUTE)) | |
2955 | { | |
2956 | /* The symbol and offset must be aligned to the access size. */ | |
2957 | unsigned int align; | |
2958 | unsigned int ref_size; | |
2959 | ||
2960 | if (CONSTANT_POOL_ADDRESS_P (sym)) | |
2961 | align = GET_MODE_ALIGNMENT (get_pool_mode (sym)); | |
2962 | else if (TREE_CONSTANT_POOL_ADDRESS_P (sym)) | |
2963 | { | |
2964 | tree exp = SYMBOL_REF_DECL (sym); | |
2965 | align = TYPE_ALIGN (TREE_TYPE (exp)); | |
2966 | align = CONSTANT_ALIGNMENT (exp, align); | |
2967 | } | |
2968 | else if (SYMBOL_REF_DECL (sym)) | |
2969 | align = DECL_ALIGN (SYMBOL_REF_DECL (sym)); | |
2970 | else | |
2971 | align = BITS_PER_UNIT; | |
2972 | ||
2973 | ref_size = GET_MODE_SIZE (mode); | |
2974 | if (ref_size == 0) | |
2975 | ref_size = GET_MODE_SIZE (DImode); | |
2976 | ||
2977 | return ((INTVAL (offs) & (ref_size - 1)) == 0 | |
2978 | && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0); | |
2979 | } | |
2980 | } | |
2981 | return false; | |
2982 | ||
2983 | default: | |
2984 | return false; | |
2985 | } | |
2986 | } | |
2987 | ||
2988 | bool | |
2989 | aarch64_symbolic_address_p (rtx x) | |
2990 | { | |
2991 | rtx offset; | |
2992 | ||
2993 | split_const (x, &x, &offset); | |
2994 | return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF; | |
2995 | } | |
2996 | ||
2997 | /* Classify the base of symbolic expression X, given that X appears in | |
2998 | context CONTEXT. */ | |
2999 | static enum aarch64_symbol_type | |
3000 | aarch64_classify_symbolic_expression (rtx x, enum aarch64_symbol_context context) | |
3001 | { | |
3002 | rtx offset; | |
3003 | split_const (x, &x, &offset); | |
3004 | return aarch64_classify_symbol (x, context); | |
3005 | } | |
3006 | ||
3007 | ||
3008 | /* Return TRUE if X is a legitimate address for accessing memory in | |
3009 | mode MODE. */ | |
3010 | static bool | |
3011 | aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p) | |
3012 | { | |
3013 | struct aarch64_address_info addr; | |
3014 | ||
3015 | return aarch64_classify_address (&addr, x, mode, MEM, strict_p); | |
3016 | } | |
3017 | ||
3018 | /* Return TRUE if X is a legitimate address for accessing memory in | |
3019 | mode MODE. OUTER_CODE will be PARALLEL if this is a load/store | |
3020 | pair operation. */ | |
3021 | bool | |
3022 | aarch64_legitimate_address_p (enum machine_mode mode, rtx x, | |
3023 | RTX_CODE outer_code, bool strict_p) | |
3024 | { | |
3025 | struct aarch64_address_info addr; | |
3026 | ||
3027 | return aarch64_classify_address (&addr, x, mode, outer_code, strict_p); | |
3028 | } | |
3029 | ||
3030 | /* Return TRUE if rtx X is immediate constant 0.0 */ | |
3031 | bool | |
3520f7cc | 3032 | aarch64_float_const_zero_rtx_p (rtx x) |
43e9d192 IB |
3033 | { |
3034 | REAL_VALUE_TYPE r; | |
3035 | ||
3036 | if (GET_MODE (x) == VOIDmode) | |
3037 | return false; | |
3038 | ||
3039 | REAL_VALUE_FROM_CONST_DOUBLE (r, x); | |
3040 | if (REAL_VALUE_MINUS_ZERO (r)) | |
3041 | return !HONOR_SIGNED_ZEROS (GET_MODE (x)); | |
3042 | return REAL_VALUES_EQUAL (r, dconst0); | |
3043 | } | |
3044 | ||
70f09188 AP |
3045 | /* Return the fixed registers used for condition codes. */ |
3046 | ||
3047 | static bool | |
3048 | aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2) | |
3049 | { | |
3050 | *p1 = CC_REGNUM; | |
3051 | *p2 = INVALID_REGNUM; | |
3052 | return true; | |
3053 | } | |
3054 | ||
43e9d192 IB |
3055 | enum machine_mode |
3056 | aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y) | |
3057 | { | |
3058 | /* All floating point compares return CCFP if it is an equality | |
3059 | comparison, and CCFPE otherwise. */ | |
3060 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) | |
3061 | { | |
3062 | switch (code) | |
3063 | { | |
3064 | case EQ: | |
3065 | case NE: | |
3066 | case UNORDERED: | |
3067 | case ORDERED: | |
3068 | case UNLT: | |
3069 | case UNLE: | |
3070 | case UNGT: | |
3071 | case UNGE: | |
3072 | case UNEQ: | |
3073 | case LTGT: | |
3074 | return CCFPmode; | |
3075 | ||
3076 | case LT: | |
3077 | case LE: | |
3078 | case GT: | |
3079 | case GE: | |
3080 | return CCFPEmode; | |
3081 | ||
3082 | default: | |
3083 | gcc_unreachable (); | |
3084 | } | |
3085 | } | |
3086 | ||
3087 | if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode) | |
3088 | && y == const0_rtx | |
3089 | && (code == EQ || code == NE || code == LT || code == GE) | |
a8504f22 | 3090 | && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND)) |
43e9d192 IB |
3091 | return CC_NZmode; |
3092 | ||
3093 | /* A compare with a shifted operand. Because of canonicalization, | |
3094 | the comparison will have to be swapped when we emit the assembly | |
3095 | code. */ | |
3096 | if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode) | |
3097 | && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG) | |
3098 | && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT | |
3099 | || GET_CODE (x) == LSHIFTRT | |
3100 | || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)) | |
3101 | return CC_SWPmode; | |
3102 | ||
3103 | /* A compare of a mode narrower than SI mode against zero can be done | |
3104 | by extending the value in the comparison. */ | |
3105 | if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode) | |
3106 | && y == const0_rtx) | |
3107 | /* Only use sign-extension if we really need it. */ | |
3108 | return ((code == GT || code == GE || code == LE || code == LT) | |
3109 | ? CC_SESWPmode : CC_ZESWPmode); | |
3110 | ||
3111 | /* For everything else, return CCmode. */ | |
3112 | return CCmode; | |
3113 | } | |
3114 | ||
3115 | static unsigned | |
3116 | aarch64_get_condition_code (rtx x) | |
3117 | { | |
3118 | enum machine_mode mode = GET_MODE (XEXP (x, 0)); | |
3119 | enum rtx_code comp_code = GET_CODE (x); | |
3120 | ||
3121 | if (GET_MODE_CLASS (mode) != MODE_CC) | |
3122 | mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1)); | |
3123 | ||
3124 | switch (mode) | |
3125 | { | |
3126 | case CCFPmode: | |
3127 | case CCFPEmode: | |
3128 | switch (comp_code) | |
3129 | { | |
3130 | case GE: return AARCH64_GE; | |
3131 | case GT: return AARCH64_GT; | |
3132 | case LE: return AARCH64_LS; | |
3133 | case LT: return AARCH64_MI; | |
3134 | case NE: return AARCH64_NE; | |
3135 | case EQ: return AARCH64_EQ; | |
3136 | case ORDERED: return AARCH64_VC; | |
3137 | case UNORDERED: return AARCH64_VS; | |
3138 | case UNLT: return AARCH64_LT; | |
3139 | case UNLE: return AARCH64_LE; | |
3140 | case UNGT: return AARCH64_HI; | |
3141 | case UNGE: return AARCH64_PL; | |
3142 | default: gcc_unreachable (); | |
3143 | } | |
3144 | break; | |
3145 | ||
3146 | case CCmode: | |
3147 | switch (comp_code) | |
3148 | { | |
3149 | case NE: return AARCH64_NE; | |
3150 | case EQ: return AARCH64_EQ; | |
3151 | case GE: return AARCH64_GE; | |
3152 | case GT: return AARCH64_GT; | |
3153 | case LE: return AARCH64_LE; | |
3154 | case LT: return AARCH64_LT; | |
3155 | case GEU: return AARCH64_CS; | |
3156 | case GTU: return AARCH64_HI; | |
3157 | case LEU: return AARCH64_LS; | |
3158 | case LTU: return AARCH64_CC; | |
3159 | default: gcc_unreachable (); | |
3160 | } | |
3161 | break; | |
3162 | ||
3163 | case CC_SWPmode: | |
3164 | case CC_ZESWPmode: | |
3165 | case CC_SESWPmode: | |
3166 | switch (comp_code) | |
3167 | { | |
3168 | case NE: return AARCH64_NE; | |
3169 | case EQ: return AARCH64_EQ; | |
3170 | case GE: return AARCH64_LE; | |
3171 | case GT: return AARCH64_LT; | |
3172 | case LE: return AARCH64_GE; | |
3173 | case LT: return AARCH64_GT; | |
3174 | case GEU: return AARCH64_LS; | |
3175 | case GTU: return AARCH64_CC; | |
3176 | case LEU: return AARCH64_CS; | |
3177 | case LTU: return AARCH64_HI; | |
3178 | default: gcc_unreachable (); | |
3179 | } | |
3180 | break; | |
3181 | ||
3182 | case CC_NZmode: | |
3183 | switch (comp_code) | |
3184 | { | |
3185 | case NE: return AARCH64_NE; | |
3186 | case EQ: return AARCH64_EQ; | |
3187 | case GE: return AARCH64_PL; | |
3188 | case LT: return AARCH64_MI; | |
3189 | default: gcc_unreachable (); | |
3190 | } | |
3191 | break; | |
3192 | ||
3193 | default: | |
3194 | gcc_unreachable (); | |
3195 | break; | |
3196 | } | |
3197 | } | |
3198 | ||
3199 | static unsigned | |
3200 | bit_count (unsigned HOST_WIDE_INT value) | |
3201 | { | |
3202 | unsigned count = 0; | |
3203 | ||
3204 | while (value) | |
3205 | { | |
3206 | count++; | |
3207 | value &= value - 1; | |
3208 | } | |
3209 | ||
3210 | return count; | |
3211 | } | |
3212 | ||
3213 | void | |
3214 | aarch64_print_operand (FILE *f, rtx x, char code) | |
3215 | { | |
3216 | switch (code) | |
3217 | { | |
3218 | case 'e': | |
3219 | /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */ | |
3220 | { | |
3221 | int n; | |
3222 | ||
3223 | if (GET_CODE (x) != CONST_INT | |
3224 | || (n = exact_log2 (INTVAL (x) & ~7)) <= 0) | |
3225 | { | |
3226 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3227 | return; | |
3228 | } | |
3229 | ||
3230 | switch (n) | |
3231 | { | |
3232 | case 3: | |
3233 | fputc ('b', f); | |
3234 | break; | |
3235 | case 4: | |
3236 | fputc ('h', f); | |
3237 | break; | |
3238 | case 5: | |
3239 | fputc ('w', f); | |
3240 | break; | |
3241 | default: | |
3242 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3243 | return; | |
3244 | } | |
3245 | } | |
3246 | break; | |
3247 | ||
3248 | case 'p': | |
3249 | { | |
3250 | int n; | |
3251 | ||
3252 | /* Print N such that 2^N == X. */ | |
3253 | if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0) | |
3254 | { | |
3255 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3256 | return; | |
3257 | } | |
3258 | ||
3259 | asm_fprintf (f, "%d", n); | |
3260 | } | |
3261 | break; | |
3262 | ||
3263 | case 'P': | |
3264 | /* Print the number of non-zero bits in X (a const_int). */ | |
3265 | if (GET_CODE (x) != CONST_INT) | |
3266 | { | |
3267 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3268 | return; | |
3269 | } | |
3270 | ||
3271 | asm_fprintf (f, "%u", bit_count (INTVAL (x))); | |
3272 | break; | |
3273 | ||
3274 | case 'H': | |
3275 | /* Print the higher numbered register of a pair (TImode) of regs. */ | |
3276 | if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1)) | |
3277 | { | |
3278 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3279 | return; | |
3280 | } | |
3281 | ||
01a3a324 | 3282 | asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]); |
43e9d192 IB |
3283 | break; |
3284 | ||
3285 | case 'Q': | |
3286 | /* Print the least significant register of a pair (TImode) of regs. */ | |
3287 | if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1)) | |
3288 | { | |
3289 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3290 | return; | |
3291 | } | |
01a3a324 | 3292 | asm_fprintf (f, "%s", reg_names [REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)]); |
43e9d192 IB |
3293 | break; |
3294 | ||
3295 | case 'R': | |
3296 | /* Print the most significant register of a pair (TImode) of regs. */ | |
3297 | if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1)) | |
3298 | { | |
3299 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3300 | return; | |
3301 | } | |
01a3a324 | 3302 | asm_fprintf (f, "%s", reg_names [REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)]); |
43e9d192 IB |
3303 | break; |
3304 | ||
3305 | case 'm': | |
3306 | /* Print a condition (eq, ne, etc). */ | |
3307 | ||
3308 | /* CONST_TRUE_RTX means always -- that's the default. */ | |
3309 | if (x == const_true_rtx) | |
3310 | return; | |
3311 | ||
3312 | if (!COMPARISON_P (x)) | |
3313 | { | |
3314 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3315 | return; | |
3316 | } | |
3317 | ||
3318 | fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f); | |
3319 | break; | |
3320 | ||
3321 | case 'M': | |
3322 | /* Print the inverse of a condition (eq <-> ne, etc). */ | |
3323 | ||
3324 | /* CONST_TRUE_RTX means never -- that's the default. */ | |
3325 | if (x == const_true_rtx) | |
3326 | { | |
3327 | fputs ("nv", f); | |
3328 | return; | |
3329 | } | |
3330 | ||
3331 | if (!COMPARISON_P (x)) | |
3332 | { | |
3333 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3334 | return; | |
3335 | } | |
3336 | ||
3337 | fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE | |
3338 | (aarch64_get_condition_code (x))], f); | |
3339 | break; | |
3340 | ||
3341 | case 'b': | |
3342 | case 'h': | |
3343 | case 's': | |
3344 | case 'd': | |
3345 | case 'q': | |
3346 | /* Print a scalar FP/SIMD register name. */ | |
3347 | if (!REG_P (x) || !FP_REGNUM_P (REGNO (x))) | |
3348 | { | |
3349 | output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code); | |
3350 | return; | |
3351 | } | |
50ce6f88 | 3352 | asm_fprintf (f, "%c%d", code, REGNO (x) - V0_REGNUM); |
43e9d192 IB |
3353 | break; |
3354 | ||
3355 | case 'S': | |
3356 | case 'T': | |
3357 | case 'U': | |
3358 | case 'V': | |
3359 | /* Print the first FP/SIMD register name in a list. */ | |
3360 | if (!REG_P (x) || !FP_REGNUM_P (REGNO (x))) | |
3361 | { | |
3362 | output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code); | |
3363 | return; | |
3364 | } | |
50ce6f88 | 3365 | asm_fprintf (f, "v%d", REGNO (x) - V0_REGNUM + (code - 'S')); |
43e9d192 IB |
3366 | break; |
3367 | ||
a05c0ddf IB |
3368 | case 'X': |
3369 | /* Print integer constant in hex. */ | |
3370 | if (GET_CODE (x) != CONST_INT) | |
3371 | { | |
3372 | output_operand_lossage ("invalid operand for '%%%c'", code); | |
3373 | return; | |
3374 | } | |
3375 | asm_fprintf (f, "0x%x", UINTVAL (x)); | |
3376 | break; | |
3377 | ||
43e9d192 IB |
3378 | case 'w': |
3379 | case 'x': | |
3380 | /* Print a general register name or the zero register (32-bit or | |
3381 | 64-bit). */ | |
3520f7cc JG |
3382 | if (x == const0_rtx |
3383 | || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x))) | |
43e9d192 | 3384 | { |
50ce6f88 | 3385 | asm_fprintf (f, "%czr", code); |
43e9d192 IB |
3386 | break; |
3387 | } | |
3388 | ||
3389 | if (REG_P (x) && GP_REGNUM_P (REGNO (x))) | |
3390 | { | |
50ce6f88 | 3391 | asm_fprintf (f, "%c%d", code, REGNO (x) - R0_REGNUM); |
43e9d192 IB |
3392 | break; |
3393 | } | |
3394 | ||
3395 | if (REG_P (x) && REGNO (x) == SP_REGNUM) | |
3396 | { | |
50ce6f88 | 3397 | asm_fprintf (f, "%ssp", code == 'w' ? "w" : ""); |
43e9d192 IB |
3398 | break; |
3399 | } | |
3400 | ||
3401 | /* Fall through */ | |
3402 | ||
3403 | case 0: | |
3404 | /* Print a normal operand, if it's a general register, then we | |
3405 | assume DImode. */ | |
3406 | if (x == NULL) | |
3407 | { | |
3408 | output_operand_lossage ("missing operand"); | |
3409 | return; | |
3410 | } | |
3411 | ||
3412 | switch (GET_CODE (x)) | |
3413 | { | |
3414 | case REG: | |
01a3a324 | 3415 | asm_fprintf (f, "%s", reg_names [REGNO (x)]); |
43e9d192 IB |
3416 | break; |
3417 | ||
3418 | case MEM: | |
3419 | aarch64_memory_reference_mode = GET_MODE (x); | |
3420 | output_address (XEXP (x, 0)); | |
3421 | break; | |
3422 | ||
3423 | case LABEL_REF: | |
3424 | case SYMBOL_REF: | |
3425 | output_addr_const (asm_out_file, x); | |
3426 | break; | |
3427 | ||
3428 | case CONST_INT: | |
3429 | asm_fprintf (f, "%wd", INTVAL (x)); | |
3430 | break; | |
3431 | ||
3432 | case CONST_VECTOR: | |
3520f7cc JG |
3433 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT) |
3434 | { | |
3435 | gcc_assert (aarch64_const_vec_all_same_int_p (x, | |
3436 | HOST_WIDE_INT_MIN, | |
3437 | HOST_WIDE_INT_MAX)); | |
3438 | asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0))); | |
3439 | } | |
3440 | else if (aarch64_simd_imm_zero_p (x, GET_MODE (x))) | |
3441 | { | |
3442 | fputc ('0', f); | |
3443 | } | |
3444 | else | |
3445 | gcc_unreachable (); | |
43e9d192 IB |
3446 | break; |
3447 | ||
3520f7cc JG |
3448 | case CONST_DOUBLE: |
3449 | /* CONST_DOUBLE can represent a double-width integer. | |
3450 | In this case, the mode of x is VOIDmode. */ | |
3451 | if (GET_MODE (x) == VOIDmode) | |
3452 | ; /* Do Nothing. */ | |
3453 | else if (aarch64_float_const_zero_rtx_p (x)) | |
3454 | { | |
3455 | fputc ('0', f); | |
3456 | break; | |
3457 | } | |
3458 | else if (aarch64_float_const_representable_p (x)) | |
3459 | { | |
3460 | #define buf_size 20 | |
3461 | char float_buf[buf_size] = {'\0'}; | |
3462 | REAL_VALUE_TYPE r; | |
3463 | REAL_VALUE_FROM_CONST_DOUBLE (r, x); | |
3464 | real_to_decimal_for_mode (float_buf, &r, | |
3465 | buf_size, buf_size, | |
3466 | 1, GET_MODE (x)); | |
3467 | asm_fprintf (asm_out_file, "%s", float_buf); | |
3468 | break; | |
3469 | #undef buf_size | |
3470 | } | |
3471 | output_operand_lossage ("invalid constant"); | |
3472 | return; | |
43e9d192 IB |
3473 | default: |
3474 | output_operand_lossage ("invalid operand"); | |
3475 | return; | |
3476 | } | |
3477 | break; | |
3478 | ||
3479 | case 'A': | |
3480 | if (GET_CODE (x) == HIGH) | |
3481 | x = XEXP (x, 0); | |
3482 | ||
3483 | switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR)) | |
3484 | { | |
3485 | case SYMBOL_SMALL_GOT: | |
3486 | asm_fprintf (asm_out_file, ":got:"); | |
3487 | break; | |
3488 | ||
3489 | case SYMBOL_SMALL_TLSGD: | |
3490 | asm_fprintf (asm_out_file, ":tlsgd:"); | |
3491 | break; | |
3492 | ||
3493 | case SYMBOL_SMALL_TLSDESC: | |
3494 | asm_fprintf (asm_out_file, ":tlsdesc:"); | |
3495 | break; | |
3496 | ||
3497 | case SYMBOL_SMALL_GOTTPREL: | |
3498 | asm_fprintf (asm_out_file, ":gottprel:"); | |
3499 | break; | |
3500 | ||
3501 | case SYMBOL_SMALL_TPREL: | |
3502 | asm_fprintf (asm_out_file, ":tprel:"); | |
3503 | break; | |
3504 | ||
3505 | default: | |
3506 | break; | |
3507 | } | |
3508 | output_addr_const (asm_out_file, x); | |
3509 | break; | |
3510 | ||
3511 | case 'L': | |
3512 | switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR)) | |
3513 | { | |
3514 | case SYMBOL_SMALL_GOT: | |
3515 | asm_fprintf (asm_out_file, ":lo12:"); | |
3516 | break; | |
3517 | ||
3518 | case SYMBOL_SMALL_TLSGD: | |
3519 | asm_fprintf (asm_out_file, ":tlsgd_lo12:"); | |
3520 | break; | |
3521 | ||
3522 | case SYMBOL_SMALL_TLSDESC: | |
3523 | asm_fprintf (asm_out_file, ":tlsdesc_lo12:"); | |
3524 | break; | |
3525 | ||
3526 | case SYMBOL_SMALL_GOTTPREL: | |
3527 | asm_fprintf (asm_out_file, ":gottprel_lo12:"); | |
3528 | break; | |
3529 | ||
3530 | case SYMBOL_SMALL_TPREL: | |
3531 | asm_fprintf (asm_out_file, ":tprel_lo12_nc:"); | |
3532 | break; | |
3533 | ||
3534 | default: | |
3535 | break; | |
3536 | } | |
3537 | output_addr_const (asm_out_file, x); | |
3538 | break; | |
3539 | ||
3540 | case 'G': | |
3541 | ||
3542 | switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR)) | |
3543 | { | |
3544 | case SYMBOL_SMALL_TPREL: | |
3545 | asm_fprintf (asm_out_file, ":tprel_hi12:"); | |
3546 | break; | |
3547 | default: | |
3548 | break; | |
3549 | } | |
3550 | output_addr_const (asm_out_file, x); | |
3551 | break; | |
3552 | ||
3553 | default: | |
3554 | output_operand_lossage ("invalid operand prefix '%%%c'", code); | |
3555 | return; | |
3556 | } | |
3557 | } | |
3558 | ||
3559 | void | |
3560 | aarch64_print_operand_address (FILE *f, rtx x) | |
3561 | { | |
3562 | struct aarch64_address_info addr; | |
3563 | ||
3564 | if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode, | |
3565 | MEM, true)) | |
3566 | switch (addr.type) | |
3567 | { | |
3568 | case ADDRESS_REG_IMM: | |
3569 | if (addr.offset == const0_rtx) | |
01a3a324 | 3570 | asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]); |
43e9d192 | 3571 | else |
01a3a324 | 3572 | asm_fprintf (f, "[%s,%wd]", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3573 | INTVAL (addr.offset)); |
3574 | return; | |
3575 | ||
3576 | case ADDRESS_REG_REG: | |
3577 | if (addr.shift == 0) | |
01a3a324 N |
3578 | asm_fprintf (f, "[%s,%s]", reg_names [REGNO (addr.base)], |
3579 | reg_names [REGNO (addr.offset)]); | |
43e9d192 | 3580 | else |
01a3a324 N |
3581 | asm_fprintf (f, "[%s,%s,lsl %u]", reg_names [REGNO (addr.base)], |
3582 | reg_names [REGNO (addr.offset)], addr.shift); | |
43e9d192 IB |
3583 | return; |
3584 | ||
3585 | case ADDRESS_REG_UXTW: | |
3586 | if (addr.shift == 0) | |
01a3a324 | 3587 | asm_fprintf (f, "[%s,w%d,uxtw]", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3588 | REGNO (addr.offset) - R0_REGNUM); |
3589 | else | |
01a3a324 | 3590 | asm_fprintf (f, "[%s,w%d,uxtw %u]", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3591 | REGNO (addr.offset) - R0_REGNUM, addr.shift); |
3592 | return; | |
3593 | ||
3594 | case ADDRESS_REG_SXTW: | |
3595 | if (addr.shift == 0) | |
01a3a324 | 3596 | asm_fprintf (f, "[%s,w%d,sxtw]", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3597 | REGNO (addr.offset) - R0_REGNUM); |
3598 | else | |
01a3a324 | 3599 | asm_fprintf (f, "[%s,w%d,sxtw %u]", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3600 | REGNO (addr.offset) - R0_REGNUM, addr.shift); |
3601 | return; | |
3602 | ||
3603 | case ADDRESS_REG_WB: | |
3604 | switch (GET_CODE (x)) | |
3605 | { | |
3606 | case PRE_INC: | |
01a3a324 | 3607 | asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3608 | GET_MODE_SIZE (aarch64_memory_reference_mode)); |
3609 | return; | |
3610 | case POST_INC: | |
01a3a324 | 3611 | asm_fprintf (f, "[%s],%d", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3612 | GET_MODE_SIZE (aarch64_memory_reference_mode)); |
3613 | return; | |
3614 | case PRE_DEC: | |
01a3a324 | 3615 | asm_fprintf (f, "[%s,-%d]!", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3616 | GET_MODE_SIZE (aarch64_memory_reference_mode)); |
3617 | return; | |
3618 | case POST_DEC: | |
01a3a324 | 3619 | asm_fprintf (f, "[%s],-%d", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3620 | GET_MODE_SIZE (aarch64_memory_reference_mode)); |
3621 | return; | |
3622 | case PRE_MODIFY: | |
01a3a324 | 3623 | asm_fprintf (f, "[%s,%wd]!", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3624 | INTVAL (addr.offset)); |
3625 | return; | |
3626 | case POST_MODIFY: | |
01a3a324 | 3627 | asm_fprintf (f, "[%s],%wd", reg_names [REGNO (addr.base)], |
43e9d192 IB |
3628 | INTVAL (addr.offset)); |
3629 | return; | |
3630 | default: | |
3631 | break; | |
3632 | } | |
3633 | break; | |
3634 | ||
3635 | case ADDRESS_LO_SUM: | |
01a3a324 | 3636 | asm_fprintf (f, "[%s,#:lo12:", reg_names [REGNO (addr.base)]); |
43e9d192 IB |
3637 | output_addr_const (f, addr.offset); |
3638 | asm_fprintf (f, "]"); | |
3639 | return; | |
3640 | ||
3641 | case ADDRESS_SYMBOLIC: | |
3642 | break; | |
3643 | } | |
3644 | ||
3645 | output_addr_const (f, x); | |
3646 | } | |
3647 | ||
3648 | void | |
3649 | aarch64_function_profiler (FILE *f ATTRIBUTE_UNUSED, | |
3650 | int labelno ATTRIBUTE_UNUSED) | |
3651 | { | |
3652 | sorry ("function profiling"); | |
3653 | } | |
3654 | ||
3655 | bool | |
3656 | aarch64_label_mentioned_p (rtx x) | |
3657 | { | |
3658 | const char *fmt; | |
3659 | int i; | |
3660 | ||
3661 | if (GET_CODE (x) == LABEL_REF) | |
3662 | return true; | |
3663 | ||
3664 | /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the | |
3665 | referencing instruction, but they are constant offsets, not | |
3666 | symbols. */ | |
3667 | if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS) | |
3668 | return false; | |
3669 | ||
3670 | fmt = GET_RTX_FORMAT (GET_CODE (x)); | |
3671 | for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) | |
3672 | { | |
3673 | if (fmt[i] == 'E') | |
3674 | { | |
3675 | int j; | |
3676 | ||
3677 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
3678 | if (aarch64_label_mentioned_p (XVECEXP (x, i, j))) | |
3679 | return 1; | |
3680 | } | |
3681 | else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i))) | |
3682 | return 1; | |
3683 | } | |
3684 | ||
3685 | return 0; | |
3686 | } | |
3687 | ||
3688 | /* Implement REGNO_REG_CLASS. */ | |
3689 | ||
3690 | enum reg_class | |
3691 | aarch64_regno_regclass (unsigned regno) | |
3692 | { | |
3693 | if (GP_REGNUM_P (regno)) | |
3694 | return CORE_REGS; | |
3695 | ||
3696 | if (regno == SP_REGNUM) | |
3697 | return STACK_REG; | |
3698 | ||
3699 | if (regno == FRAME_POINTER_REGNUM | |
3700 | || regno == ARG_POINTER_REGNUM) | |
3701 | return CORE_REGS; | |
3702 | ||
3703 | if (FP_REGNUM_P (regno)) | |
3704 | return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS; | |
3705 | ||
3706 | return NO_REGS; | |
3707 | } | |
3708 | ||
3709 | /* Try a machine-dependent way of reloading an illegitimate address | |
3710 | operand. If we find one, push the reload and return the new rtx. */ | |
3711 | ||
3712 | rtx | |
3713 | aarch64_legitimize_reload_address (rtx *x_p, | |
3714 | enum machine_mode mode, | |
3715 | int opnum, int type, | |
3716 | int ind_levels ATTRIBUTE_UNUSED) | |
3717 | { | |
3718 | rtx x = *x_p; | |
3719 | ||
3720 | /* Do not allow mem (plus (reg, const)) if vector mode. */ | |
3721 | if (aarch64_vector_mode_p (mode) | |
3722 | && GET_CODE (x) == PLUS | |
3723 | && REG_P (XEXP (x, 0)) | |
3724 | && CONST_INT_P (XEXP (x, 1))) | |
3725 | { | |
3726 | rtx orig_rtx = x; | |
3727 | x = copy_rtx (x); | |
3728 | push_reload (orig_rtx, NULL_RTX, x_p, NULL, | |
3729 | BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0, | |
3730 | opnum, (enum reload_type) type); | |
3731 | return x; | |
3732 | } | |
3733 | ||
3734 | /* We must recognize output that we have already generated ourselves. */ | |
3735 | if (GET_CODE (x) == PLUS | |
3736 | && GET_CODE (XEXP (x, 0)) == PLUS | |
3737 | && REG_P (XEXP (XEXP (x, 0), 0)) | |
3738 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) | |
3739 | && CONST_INT_P (XEXP (x, 1))) | |
3740 | { | |
3741 | push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, | |
3742 | BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0, | |
3743 | opnum, (enum reload_type) type); | |
3744 | return x; | |
3745 | } | |
3746 | ||
3747 | /* We wish to handle large displacements off a base register by splitting | |
3748 | the addend across an add and the mem insn. This can cut the number of | |
3749 | extra insns needed from 3 to 1. It is only useful for load/store of a | |
3750 | single register with 12 bit offset field. */ | |
3751 | if (GET_CODE (x) == PLUS | |
3752 | && REG_P (XEXP (x, 0)) | |
3753 | && CONST_INT_P (XEXP (x, 1)) | |
3754 | && HARD_REGISTER_P (XEXP (x, 0)) | |
3755 | && mode != TImode | |
3756 | && mode != TFmode | |
3757 | && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true)) | |
3758 | { | |
3759 | HOST_WIDE_INT val = INTVAL (XEXP (x, 1)); | |
3760 | HOST_WIDE_INT low = val & 0xfff; | |
3761 | HOST_WIDE_INT high = val - low; | |
3762 | HOST_WIDE_INT offs; | |
3763 | rtx cst; | |
3764 | ||
3765 | /* Reload non-zero BLKmode offsets. This is because we cannot ascertain | |
3766 | BLKmode alignment. */ | |
3767 | if (GET_MODE_SIZE (mode) == 0) | |
3768 | return NULL_RTX; | |
3769 | ||
3770 | offs = low % GET_MODE_SIZE (mode); | |
3771 | ||
3772 | /* Align misaligned offset by adjusting high part to compensate. */ | |
3773 | if (offs != 0) | |
3774 | { | |
3775 | if (aarch64_uimm12_shift (high + offs)) | |
3776 | { | |
3777 | /* Align down. */ | |
3778 | low = low - offs; | |
3779 | high = high + offs; | |
3780 | } | |
3781 | else | |
3782 | { | |
3783 | /* Align up. */ | |
3784 | offs = GET_MODE_SIZE (mode) - offs; | |
3785 | low = low + offs; | |
3786 | high = high + (low & 0x1000) - offs; | |
3787 | low &= 0xfff; | |
3788 | } | |
3789 | } | |
3790 | ||
3791 | /* Check for overflow. */ | |
3792 | if (high + low != val) | |
3793 | return NULL_RTX; | |
3794 | ||
3795 | cst = GEN_INT (high); | |
3796 | if (!aarch64_uimm12_shift (high)) | |
3797 | cst = force_const_mem (Pmode, cst); | |
3798 | ||
3799 | /* Reload high part into base reg, leaving the low part | |
3800 | in the mem instruction. */ | |
3801 | x = gen_rtx_PLUS (Pmode, | |
3802 | gen_rtx_PLUS (Pmode, XEXP (x, 0), cst), | |
3803 | GEN_INT (low)); | |
3804 | ||
3805 | push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, | |
3806 | BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, | |
3807 | opnum, (enum reload_type) type); | |
3808 | return x; | |
3809 | } | |
3810 | ||
3811 | return NULL_RTX; | |
3812 | } | |
3813 | ||
3814 | ||
3815 | static reg_class_t | |
3816 | aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x, | |
3817 | reg_class_t rclass, | |
3818 | enum machine_mode mode, | |
3819 | secondary_reload_info *sri) | |
3820 | { | |
3821 | /* Address expressions of the form PLUS (SP, large_offset) need two | |
3822 | scratch registers, one for the constant, and one for holding a | |
3823 | copy of SP, since SP cannot be used on the RHS of an add-reg | |
3824 | instruction. */ | |
3825 | if (mode == DImode | |
3826 | && GET_CODE (x) == PLUS | |
3827 | && XEXP (x, 0) == stack_pointer_rtx | |
3828 | && CONST_INT_P (XEXP (x, 1)) | |
3829 | && !aarch64_uimm12_shift (INTVAL (XEXP (x, 1)))) | |
3830 | { | |
3831 | sri->icode = CODE_FOR_reload_sp_immediate; | |
3832 | return NO_REGS; | |
3833 | } | |
3834 | ||
3835 | /* Without the TARGET_SIMD instructions we cannot move a Q register | |
3836 | to a Q register directly. We need a scratch. */ | |
3837 | if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x) | |
3838 | && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD | |
3839 | && reg_class_subset_p (rclass, FP_REGS)) | |
3840 | { | |
3841 | if (mode == TFmode) | |
3842 | sri->icode = CODE_FOR_aarch64_reload_movtf; | |
3843 | else if (mode == TImode) | |
3844 | sri->icode = CODE_FOR_aarch64_reload_movti; | |
3845 | return NO_REGS; | |
3846 | } | |
3847 | ||
3848 | /* A TFmode or TImode memory access should be handled via an FP_REGS | |
3849 | because AArch64 has richer addressing modes for LDR/STR instructions | |
3850 | than LDP/STP instructions. */ | |
3851 | if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS | |
3852 | && GET_MODE_SIZE (mode) == 16 && MEM_P (x)) | |
3853 | return FP_REGS; | |
3854 | ||
3855 | if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x)) | |
3856 | return CORE_REGS; | |
3857 | ||
3858 | return NO_REGS; | |
3859 | } | |
3860 | ||
3861 | static bool | |
3862 | aarch64_can_eliminate (const int from, const int to) | |
3863 | { | |
3864 | /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into | |
3865 | HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */ | |
3866 | ||
3867 | if (frame_pointer_needed) | |
3868 | { | |
3869 | if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) | |
3870 | return true; | |
3871 | if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) | |
3872 | return false; | |
3873 | if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM | |
3874 | && !cfun->calls_alloca) | |
3875 | return true; | |
3876 | if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) | |
3877 | return true; | |
3878 | return false; | |
3879 | } | |
3880 | else | |
3881 | { | |
3882 | /* If we decided that we didn't need a frame pointer but then used | |
3883 | LR in the function, then we do need a frame pointer after all, so | |
3884 | prevent this elimination to ensure a frame pointer is used. */ | |
3885 | ||
3886 | if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM | |
3887 | && df_regs_ever_live_p (LR_REGNUM)) | |
3888 | return false; | |
3889 | } | |
3890 | return true; | |
3891 | } | |
3892 | ||
3893 | HOST_WIDE_INT | |
3894 | aarch64_initial_elimination_offset (unsigned from, unsigned to) | |
3895 | { | |
3896 | HOST_WIDE_INT frame_size; | |
3897 | HOST_WIDE_INT offset; | |
3898 | ||
3899 | aarch64_layout_frame (); | |
3900 | frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size | |
3901 | + crtl->outgoing_args_size | |
3902 | + cfun->machine->saved_varargs_size); | |
3903 | ||
3904 | frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT); | |
3905 | offset = frame_size; | |
3906 | ||
3907 | if (to == HARD_FRAME_POINTER_REGNUM) | |
3908 | { | |
3909 | if (from == ARG_POINTER_REGNUM) | |
3910 | return offset - crtl->outgoing_args_size; | |
3911 | ||
3912 | if (from == FRAME_POINTER_REGNUM) | |
3913 | return cfun->machine->frame.saved_regs_size; | |
3914 | } | |
3915 | ||
3916 | if (to == STACK_POINTER_REGNUM) | |
3917 | { | |
3918 | if (from == FRAME_POINTER_REGNUM) | |
3919 | { | |
3920 | HOST_WIDE_INT elim = crtl->outgoing_args_size | |
3921 | + cfun->machine->frame.saved_regs_size | |
3922 | - cfun->machine->frame.fp_lr_offset; | |
3923 | elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT); | |
3924 | return elim; | |
3925 | } | |
3926 | } | |
3927 | ||
3928 | return offset; | |
3929 | } | |
3930 | ||
3931 | ||
3932 | /* Implement RETURN_ADDR_RTX. We do not support moving back to a | |
3933 | previous frame. */ | |
3934 | ||
3935 | rtx | |
3936 | aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) | |
3937 | { | |
3938 | if (count != 0) | |
3939 | return const0_rtx; | |
3940 | return get_hard_reg_initial_val (Pmode, LR_REGNUM); | |
3941 | } | |
3942 | ||
3943 | ||
3944 | static void | |
3945 | aarch64_asm_trampoline_template (FILE *f) | |
3946 | { | |
01a3a324 N |
3947 | asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]); |
3948 | asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]); | |
3949 | asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]); | |
43e9d192 IB |
3950 | assemble_aligned_integer (4, const0_rtx); |
3951 | assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); | |
3952 | assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); | |
3953 | } | |
3954 | ||
3955 | unsigned | |
3956 | aarch64_trampoline_size (void) | |
3957 | { | |
3958 | return 32; /* 3 insns + padding + 2 dwords. */ | |
3959 | } | |
3960 | ||
3961 | static void | |
3962 | aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) | |
3963 | { | |
3964 | rtx fnaddr, mem, a_tramp; | |
3965 | ||
3966 | /* Don't need to copy the trailing D-words, we fill those in below. */ | |
3967 | emit_block_move (m_tramp, assemble_trampoline_template (), | |
3968 | GEN_INT (TRAMPOLINE_SIZE - 16), BLOCK_OP_NORMAL); | |
3969 | mem = adjust_address (m_tramp, DImode, 16); | |
3970 | fnaddr = XEXP (DECL_RTL (fndecl), 0); | |
3971 | emit_move_insn (mem, fnaddr); | |
3972 | ||
3973 | mem = adjust_address (m_tramp, DImode, 24); | |
3974 | emit_move_insn (mem, chain_value); | |
3975 | ||
3976 | /* XXX We should really define a "clear_cache" pattern and use | |
3977 | gen_clear_cache(). */ | |
3978 | a_tramp = XEXP (m_tramp, 0); | |
3979 | emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"), | |
3980 | LCT_NORMAL, VOIDmode, 2, a_tramp, Pmode, | |
3981 | plus_constant (Pmode, a_tramp, TRAMPOLINE_SIZE), Pmode); | |
3982 | } | |
3983 | ||
3984 | static unsigned char | |
3985 | aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode) | |
3986 | { | |
3987 | switch (regclass) | |
3988 | { | |
3989 | case CORE_REGS: | |
3990 | case POINTER_REGS: | |
3991 | case GENERAL_REGS: | |
3992 | case ALL_REGS: | |
3993 | case FP_REGS: | |
3994 | case FP_LO_REGS: | |
3995 | return | |
3996 | aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 : | |
3997 | (GET_MODE_SIZE (mode) + 7) / 8; | |
3998 | case STACK_REG: | |
3999 | return 1; | |
4000 | ||
4001 | case NO_REGS: | |
4002 | return 0; | |
4003 | ||
4004 | default: | |
4005 | break; | |
4006 | } | |
4007 | gcc_unreachable (); | |
4008 | } | |
4009 | ||
4010 | static reg_class_t | |
4011 | aarch64_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t regclass) | |
4012 | { | |
4013 | return ((regclass == POINTER_REGS || regclass == STACK_REG) | |
4014 | ? GENERAL_REGS : regclass); | |
4015 | } | |
4016 | ||
4017 | void | |
4018 | aarch64_asm_output_labelref (FILE* f, const char *name) | |
4019 | { | |
4020 | asm_fprintf (f, "%U%s", name); | |
4021 | } | |
4022 | ||
4023 | static void | |
4024 | aarch64_elf_asm_constructor (rtx symbol, int priority) | |
4025 | { | |
4026 | if (priority == DEFAULT_INIT_PRIORITY) | |
4027 | default_ctor_section_asm_out_constructor (symbol, priority); | |
4028 | else | |
4029 | { | |
4030 | section *s; | |
4031 | char buf[18]; | |
4032 | snprintf (buf, sizeof (buf), ".init_array.%.5u", priority); | |
4033 | s = get_section (buf, SECTION_WRITE, NULL); | |
4034 | switch_to_section (s); | |
4035 | assemble_align (POINTER_SIZE); | |
4036 | fputs ("\t.dword\t", asm_out_file); | |
4037 | output_addr_const (asm_out_file, symbol); | |
4038 | fputc ('\n', asm_out_file); | |
4039 | } | |
4040 | } | |
4041 | ||
4042 | static void | |
4043 | aarch64_elf_asm_destructor (rtx symbol, int priority) | |
4044 | { | |
4045 | if (priority == DEFAULT_INIT_PRIORITY) | |
4046 | default_dtor_section_asm_out_destructor (symbol, priority); | |
4047 | else | |
4048 | { | |
4049 | section *s; | |
4050 | char buf[18]; | |
4051 | snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority); | |
4052 | s = get_section (buf, SECTION_WRITE, NULL); | |
4053 | switch_to_section (s); | |
4054 | assemble_align (POINTER_SIZE); | |
4055 | fputs ("\t.dword\t", asm_out_file); | |
4056 | output_addr_const (asm_out_file, symbol); | |
4057 | fputc ('\n', asm_out_file); | |
4058 | } | |
4059 | } | |
4060 | ||
4061 | const char* | |
4062 | aarch64_output_casesi (rtx *operands) | |
4063 | { | |
4064 | char buf[100]; | |
4065 | char label[100]; | |
4066 | rtx diff_vec = PATTERN (next_real_insn (operands[2])); | |
4067 | int index; | |
4068 | static const char *const patterns[4][2] = | |
4069 | { | |
4070 | { | |
4071 | "ldrb\t%w3, [%0,%w1,uxtw]", | |
4072 | "add\t%3, %4, %w3, sxtb #2" | |
4073 | }, | |
4074 | { | |
4075 | "ldrh\t%w3, [%0,%w1,uxtw #1]", | |
4076 | "add\t%3, %4, %w3, sxth #2" | |
4077 | }, | |
4078 | { | |
4079 | "ldr\t%w3, [%0,%w1,uxtw #2]", | |
4080 | "add\t%3, %4, %w3, sxtw #2" | |
4081 | }, | |
4082 | /* We assume that DImode is only generated when not optimizing and | |
4083 | that we don't really need 64-bit address offsets. That would | |
4084 | imply an object file with 8GB of code in a single function! */ | |
4085 | { | |
4086 | "ldr\t%w3, [%0,%w1,uxtw #2]", | |
4087 | "add\t%3, %4, %w3, sxtw #2" | |
4088 | } | |
4089 | }; | |
4090 | ||
4091 | gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); | |
4092 | ||
4093 | index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec))); | |
4094 | ||
4095 | gcc_assert (index >= 0 && index <= 3); | |
4096 | ||
4097 | /* Need to implement table size reduction, by chaning the code below. */ | |
4098 | output_asm_insn (patterns[index][0], operands); | |
4099 | ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2])); | |
4100 | snprintf (buf, sizeof (buf), | |
4101 | "adr\t%%4, %s", targetm.strip_name_encoding (label)); | |
4102 | output_asm_insn (buf, operands); | |
4103 | output_asm_insn (patterns[index][1], operands); | |
4104 | output_asm_insn ("br\t%3", operands); | |
4105 | assemble_label (asm_out_file, label); | |
4106 | return ""; | |
4107 | } | |
4108 | ||
4109 | ||
4110 | /* Return size in bits of an arithmetic operand which is shifted/scaled and | |
4111 | masked such that it is suitable for a UXTB, UXTH, or UXTW extend | |
4112 | operator. */ | |
4113 | ||
4114 | int | |
4115 | aarch64_uxt_size (int shift, HOST_WIDE_INT mask) | |
4116 | { | |
4117 | if (shift >= 0 && shift <= 3) | |
4118 | { | |
4119 | int size; | |
4120 | for (size = 8; size <= 32; size *= 2) | |
4121 | { | |
4122 | HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1; | |
4123 | if (mask == bits << shift) | |
4124 | return size; | |
4125 | } | |
4126 | } | |
4127 | return 0; | |
4128 | } | |
4129 | ||
4130 | static bool | |
4131 | aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, | |
4132 | const_rtx x ATTRIBUTE_UNUSED) | |
4133 | { | |
4134 | /* We can't use blocks for constants when we're using a per-function | |
4135 | constant pool. */ | |
4136 | return false; | |
4137 | } | |
4138 | ||
4139 | static section * | |
4140 | aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED, | |
4141 | rtx x ATTRIBUTE_UNUSED, | |
4142 | unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) | |
4143 | { | |
4144 | /* Force all constant pool entries into the current function section. */ | |
4145 | return function_section (current_function_decl); | |
4146 | } | |
4147 | ||
4148 | ||
4149 | /* Costs. */ | |
4150 | ||
4151 | /* Helper function for rtx cost calculation. Strip a shift expression | |
4152 | from X. Returns the inner operand if successful, or the original | |
4153 | expression on failure. */ | |
4154 | static rtx | |
4155 | aarch64_strip_shift (rtx x) | |
4156 | { | |
4157 | rtx op = x; | |
4158 | ||
4159 | if ((GET_CODE (op) == ASHIFT | |
4160 | || GET_CODE (op) == ASHIFTRT | |
4161 | || GET_CODE (op) == LSHIFTRT) | |
4162 | && CONST_INT_P (XEXP (op, 1))) | |
4163 | return XEXP (op, 0); | |
4164 | ||
4165 | if (GET_CODE (op) == MULT | |
4166 | && CONST_INT_P (XEXP (op, 1)) | |
4167 | && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64) | |
4168 | return XEXP (op, 0); | |
4169 | ||
4170 | return x; | |
4171 | } | |
4172 | ||
4173 | /* Helper function for rtx cost calculation. Strip a shift or extend | |
4174 | expression from X. Returns the inner operand if successful, or the | |
4175 | original expression on failure. We deal with a number of possible | |
4176 | canonicalization variations here. */ | |
4177 | static rtx | |
4178 | aarch64_strip_shift_or_extend (rtx x) | |
4179 | { | |
4180 | rtx op = x; | |
4181 | ||
4182 | /* Zero and sign extraction of a widened value. */ | |
4183 | if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT) | |
4184 | && XEXP (op, 2) == const0_rtx | |
4185 | && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1), | |
4186 | XEXP (op, 1))) | |
4187 | return XEXP (XEXP (op, 0), 0); | |
4188 | ||
4189 | /* It can also be represented (for zero-extend) as an AND with an | |
4190 | immediate. */ | |
4191 | if (GET_CODE (op) == AND | |
4192 | && GET_CODE (XEXP (op, 0)) == MULT | |
4193 | && CONST_INT_P (XEXP (XEXP (op, 0), 1)) | |
4194 | && CONST_INT_P (XEXP (op, 1)) | |
4195 | && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))), | |
4196 | INTVAL (XEXP (op, 1))) != 0) | |
4197 | return XEXP (XEXP (op, 0), 0); | |
4198 | ||
4199 | /* Now handle extended register, as this may also have an optional | |
4200 | left shift by 1..4. */ | |
4201 | if (GET_CODE (op) == ASHIFT | |
4202 | && CONST_INT_P (XEXP (op, 1)) | |
4203 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4) | |
4204 | op = XEXP (op, 0); | |
4205 | ||
4206 | if (GET_CODE (op) == ZERO_EXTEND | |
4207 | || GET_CODE (op) == SIGN_EXTEND) | |
4208 | op = XEXP (op, 0); | |
4209 | ||
4210 | if (op != x) | |
4211 | return op; | |
4212 | ||
4213 | return aarch64_strip_shift (x); | |
4214 | } | |
4215 | ||
4216 | /* Calculate the cost of calculating X, storing it in *COST. Result | |
4217 | is true if the total cost of the operation has now been calculated. */ | |
4218 | static bool | |
4219 | aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED, | |
4220 | int param ATTRIBUTE_UNUSED, int *cost, bool speed) | |
4221 | { | |
4222 | rtx op0, op1; | |
4223 | const struct cpu_rtx_cost_table *extra_cost | |
4224 | = aarch64_tune_params->insn_extra_cost; | |
4225 | ||
4226 | switch (code) | |
4227 | { | |
4228 | case SET: | |
4229 | op0 = SET_DEST (x); | |
4230 | op1 = SET_SRC (x); | |
4231 | ||
4232 | switch (GET_CODE (op0)) | |
4233 | { | |
4234 | case MEM: | |
4235 | if (speed) | |
4236 | *cost += extra_cost->memory_store; | |
4237 | ||
4238 | if (op1 != const0_rtx) | |
4239 | *cost += rtx_cost (op1, SET, 1, speed); | |
4240 | return true; | |
4241 | ||
4242 | case SUBREG: | |
4243 | if (! REG_P (SUBREG_REG (op0))) | |
4244 | *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed); | |
4245 | /* Fall through. */ | |
4246 | case REG: | |
4247 | /* Cost is just the cost of the RHS of the set. */ | |
4248 | *cost += rtx_cost (op1, SET, 1, true); | |
4249 | return true; | |
4250 | ||
4251 | case ZERO_EXTRACT: /* Bit-field insertion. */ | |
4252 | case SIGN_EXTRACT: | |
4253 | /* Strip any redundant widening of the RHS to meet the width of | |
4254 | the target. */ | |
4255 | if (GET_CODE (op1) == SUBREG) | |
4256 | op1 = SUBREG_REG (op1); | |
4257 | if ((GET_CODE (op1) == ZERO_EXTEND | |
4258 | || GET_CODE (op1) == SIGN_EXTEND) | |
4259 | && GET_CODE (XEXP (op0, 1)) == CONST_INT | |
4260 | && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0))) | |
4261 | >= INTVAL (XEXP (op0, 1)))) | |
4262 | op1 = XEXP (op1, 0); | |
4263 | *cost += rtx_cost (op1, SET, 1, speed); | |
4264 | return true; | |
4265 | ||
4266 | default: | |
4267 | break; | |
4268 | } | |
4269 | return false; | |
4270 | ||
4271 | case MEM: | |
4272 | if (speed) | |
4273 | *cost += extra_cost->memory_load; | |
4274 | ||
4275 | return true; | |
4276 | ||
4277 | case NEG: | |
4278 | op0 = CONST0_RTX (GET_MODE (x)); | |
4279 | op1 = XEXP (x, 0); | |
4280 | goto cost_minus; | |
4281 | ||
4282 | case COMPARE: | |
4283 | op0 = XEXP (x, 0); | |
4284 | op1 = XEXP (x, 1); | |
4285 | ||
4286 | if (op1 == const0_rtx | |
4287 | && GET_CODE (op0) == AND) | |
4288 | { | |
4289 | x = op0; | |
4290 | goto cost_logic; | |
4291 | } | |
4292 | ||
4293 | /* Comparisons can work if the order is swapped. | |
4294 | Canonicalization puts the more complex operation first, but | |
4295 | we want it in op1. */ | |
4296 | if (! (REG_P (op0) | |
4297 | || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0))))) | |
4298 | { | |
4299 | op0 = XEXP (x, 1); | |
4300 | op1 = XEXP (x, 0); | |
4301 | } | |
4302 | goto cost_minus; | |
4303 | ||
4304 | case MINUS: | |
4305 | op0 = XEXP (x, 0); | |
4306 | op1 = XEXP (x, 1); | |
4307 | ||
4308 | cost_minus: | |
4309 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT | |
4310 | || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC | |
4311 | && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)) | |
4312 | { | |
4313 | if (op0 != const0_rtx) | |
4314 | *cost += rtx_cost (op0, MINUS, 0, speed); | |
4315 | ||
4316 | if (CONST_INT_P (op1)) | |
4317 | { | |
4318 | if (!aarch64_uimm12_shift (INTVAL (op1))) | |
4319 | *cost += rtx_cost (op1, MINUS, 1, speed); | |
4320 | } | |
4321 | else | |
4322 | { | |
4323 | op1 = aarch64_strip_shift_or_extend (op1); | |
4324 | *cost += rtx_cost (op1, MINUS, 1, speed); | |
4325 | } | |
4326 | return true; | |
4327 | } | |
4328 | ||
4329 | return false; | |
4330 | ||
4331 | case PLUS: | |
4332 | op0 = XEXP (x, 0); | |
4333 | op1 = XEXP (x, 1); | |
4334 | ||
4335 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) | |
4336 | { | |
4337 | if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1))) | |
4338 | { | |
4339 | *cost += rtx_cost (op0, PLUS, 0, speed); | |
4340 | } | |
4341 | else | |
4342 | { | |
4343 | rtx new_op0 = aarch64_strip_shift_or_extend (op0); | |
4344 | ||
4345 | if (new_op0 == op0 | |
4346 | && GET_CODE (op0) == MULT) | |
4347 | { | |
4348 | if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND | |
4349 | && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND) | |
4350 | || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND | |
4351 | && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND)) | |
4352 | { | |
4353 | *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0, | |
4354 | speed) | |
4355 | + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1, | |
4356 | speed) | |
4357 | + rtx_cost (op1, PLUS, 1, speed)); | |
4358 | if (speed) | |
4359 | *cost += extra_cost->int_multiply_extend_add; | |
4360 | return true; | |
4361 | } | |
4362 | *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed) | |
4363 | + rtx_cost (XEXP (op0, 1), MULT, 1, speed) | |
4364 | + rtx_cost (op1, PLUS, 1, speed)); | |
4365 | ||
4366 | if (speed) | |
4367 | *cost += extra_cost->int_multiply_add; | |
4368 | } | |
4369 | ||
4370 | *cost += (rtx_cost (new_op0, PLUS, 0, speed) | |
4371 | + rtx_cost (op1, PLUS, 1, speed)); | |
4372 | } | |
4373 | return true; | |
4374 | } | |
4375 | ||
4376 | return false; | |
4377 | ||
4378 | case IOR: | |
4379 | case XOR: | |
4380 | case AND: | |
4381 | cost_logic: | |
4382 | op0 = XEXP (x, 0); | |
4383 | op1 = XEXP (x, 1); | |
4384 | ||
4385 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) | |
4386 | { | |
4387 | if (CONST_INT_P (op1) | |
4388 | && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x))) | |
4389 | { | |
4390 | *cost += rtx_cost (op0, AND, 0, speed); | |
4391 | } | |
4392 | else | |
4393 | { | |
4394 | if (GET_CODE (op0) == NOT) | |
4395 | op0 = XEXP (op0, 0); | |
4396 | op0 = aarch64_strip_shift (op0); | |
4397 | *cost += (rtx_cost (op0, AND, 0, speed) | |
4398 | + rtx_cost (op1, AND, 1, speed)); | |
4399 | } | |
4400 | return true; | |
4401 | } | |
4402 | return false; | |
4403 | ||
4404 | case ZERO_EXTEND: | |
4405 | if ((GET_MODE (x) == DImode | |
4406 | && GET_MODE (XEXP (x, 0)) == SImode) | |
4407 | || GET_CODE (XEXP (x, 0)) == MEM) | |
4408 | { | |
4409 | *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed); | |
4410 | return true; | |
4411 | } | |
4412 | return false; | |
4413 | ||
4414 | case SIGN_EXTEND: | |
4415 | if (GET_CODE (XEXP (x, 0)) == MEM) | |
4416 | { | |
4417 | *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed); | |
4418 | return true; | |
4419 | } | |
4420 | return false; | |
4421 | ||
4422 | case ROTATE: | |
4423 | if (!CONST_INT_P (XEXP (x, 1))) | |
4424 | *cost += COSTS_N_INSNS (2); | |
4425 | /* Fall through. */ | |
4426 | case ROTATERT: | |
4427 | case LSHIFTRT: | |
4428 | case ASHIFT: | |
4429 | case ASHIFTRT: | |
4430 | ||
4431 | /* Shifting by a register often takes an extra cycle. */ | |
4432 | if (speed && !CONST_INT_P (XEXP (x, 1))) | |
4433 | *cost += extra_cost->register_shift; | |
4434 | ||
4435 | *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed); | |
4436 | return true; | |
4437 | ||
4438 | case HIGH: | |
4439 | if (!CONSTANT_P (XEXP (x, 0))) | |
4440 | *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed); | |
4441 | return true; | |
4442 | ||
4443 | case LO_SUM: | |
4444 | if (!CONSTANT_P (XEXP (x, 1))) | |
4445 | *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed); | |
4446 | *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed); | |
4447 | return true; | |
4448 | ||
4449 | case ZERO_EXTRACT: | |
4450 | case SIGN_EXTRACT: | |
4451 | *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed); | |
4452 | return true; | |
4453 | ||
4454 | case MULT: | |
4455 | op0 = XEXP (x, 0); | |
4456 | op1 = XEXP (x, 1); | |
4457 | ||
4458 | *cost = COSTS_N_INSNS (1); | |
4459 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) | |
4460 | { | |
4461 | if (CONST_INT_P (op1) | |
4462 | && exact_log2 (INTVAL (op1)) > 0) | |
4463 | { | |
4464 | *cost += rtx_cost (op0, ASHIFT, 0, speed); | |
4465 | return true; | |
4466 | } | |
4467 | ||
4468 | if ((GET_CODE (op0) == ZERO_EXTEND | |
4469 | && GET_CODE (op1) == ZERO_EXTEND) | |
4470 | || (GET_CODE (op0) == SIGN_EXTEND | |
4471 | && GET_CODE (op1) == SIGN_EXTEND)) | |
4472 | { | |
4473 | *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed) | |
4474 | + rtx_cost (XEXP (op1, 0), MULT, 1, speed)); | |
4475 | if (speed) | |
4476 | *cost += extra_cost->int_multiply_extend; | |
4477 | return true; | |
4478 | } | |
4479 | ||
4480 | if (speed) | |
4481 | *cost += extra_cost->int_multiply; | |
4482 | } | |
4483 | else if (speed) | |
4484 | { | |
4485 | if (GET_MODE (x) == DFmode) | |
4486 | *cost += extra_cost->double_multiply; | |
4487 | else if (GET_MODE (x) == SFmode) | |
4488 | *cost += extra_cost->float_multiply; | |
4489 | } | |
4490 | ||
4491 | return false; /* All arguments need to be in registers. */ | |
4492 | ||
4493 | case MOD: | |
4494 | case UMOD: | |
4495 | *cost = COSTS_N_INSNS (2); | |
4496 | if (speed) | |
4497 | { | |
4498 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) | |
4499 | *cost += (extra_cost->int_multiply_add | |
4500 | + extra_cost->int_divide); | |
4501 | else if (GET_MODE (x) == DFmode) | |
4502 | *cost += (extra_cost->double_multiply | |
4503 | + extra_cost->double_divide); | |
4504 | else if (GET_MODE (x) == SFmode) | |
4505 | *cost += (extra_cost->float_multiply | |
4506 | + extra_cost->float_divide); | |
4507 | } | |
4508 | return false; /* All arguments need to be in registers. */ | |
4509 | ||
4510 | case DIV: | |
4511 | case UDIV: | |
4512 | *cost = COSTS_N_INSNS (1); | |
4513 | if (speed) | |
4514 | { | |
4515 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) | |
4516 | *cost += extra_cost->int_divide; | |
4517 | else if (GET_MODE (x) == DFmode) | |
4518 | *cost += extra_cost->double_divide; | |
4519 | else if (GET_MODE (x) == SFmode) | |
4520 | *cost += extra_cost->float_divide; | |
4521 | } | |
4522 | return false; /* All arguments need to be in registers. */ | |
4523 | ||
4524 | default: | |
4525 | break; | |
4526 | } | |
4527 | return false; | |
4528 | } | |
4529 | ||
4530 | static int | |
4531 | aarch64_address_cost (rtx x ATTRIBUTE_UNUSED, | |
4532 | enum machine_mode mode ATTRIBUTE_UNUSED, | |
4533 | addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED) | |
4534 | { | |
4535 | enum rtx_code c = GET_CODE (x); | |
4536 | const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost; | |
4537 | ||
4538 | if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY) | |
4539 | return addr_cost->pre_modify; | |
4540 | ||
4541 | if (c == POST_INC || c == POST_DEC || c == POST_MODIFY) | |
4542 | return addr_cost->post_modify; | |
4543 | ||
4544 | if (c == PLUS) | |
4545 | { | |
4546 | if (GET_CODE (XEXP (x, 1)) == CONST_INT) | |
4547 | return addr_cost->imm_offset; | |
4548 | else if (GET_CODE (XEXP (x, 0)) == MULT | |
4549 | || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND | |
4550 | || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) | |
4551 | return addr_cost->register_extend; | |
4552 | ||
4553 | return addr_cost->register_offset; | |
4554 | } | |
4555 | else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF) | |
4556 | return addr_cost->imm_offset; | |
4557 | ||
4558 | return 0; | |
4559 | } | |
4560 | ||
4561 | static int | |
4562 | aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, | |
4563 | reg_class_t from, reg_class_t to) | |
4564 | { | |
4565 | const struct cpu_regmove_cost *regmove_cost | |
4566 | = aarch64_tune_params->regmove_cost; | |
4567 | ||
4568 | if (from == GENERAL_REGS && to == GENERAL_REGS) | |
4569 | return regmove_cost->GP2GP; | |
4570 | else if (from == GENERAL_REGS) | |
4571 | return regmove_cost->GP2FP; | |
4572 | else if (to == GENERAL_REGS) | |
4573 | return regmove_cost->FP2GP; | |
4574 | ||
4575 | /* When AdvSIMD instructions are disabled it is not possible to move | |
4576 | a 128-bit value directly between Q registers. This is handled in | |
4577 | secondary reload. A general register is used as a scratch to move | |
4578 | the upper DI value and the lower DI value is moved directly, | |
4579 | hence the cost is the sum of three moves. */ | |
4580 | ||
4581 | if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128) | |
4582 | return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP; | |
4583 | ||
4584 | return regmove_cost->FP2FP; | |
4585 | } | |
4586 | ||
4587 | static int | |
4588 | aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, | |
4589 | reg_class_t rclass ATTRIBUTE_UNUSED, | |
4590 | bool in ATTRIBUTE_UNUSED) | |
4591 | { | |
4592 | return aarch64_tune_params->memmov_cost; | |
4593 | } | |
4594 | ||
4595 | static void initialize_aarch64_code_model (void); | |
4596 | ||
4597 | /* Parse the architecture extension string. */ | |
4598 | ||
4599 | static void | |
4600 | aarch64_parse_extension (char *str) | |
4601 | { | |
4602 | /* The extension string is parsed left to right. */ | |
4603 | const struct aarch64_option_extension *opt = NULL; | |
4604 | ||
4605 | /* Flag to say whether we are adding or removing an extension. */ | |
4606 | int adding_ext = -1; | |
4607 | ||
4608 | while (str != NULL && *str != 0) | |
4609 | { | |
4610 | char *ext; | |
4611 | size_t len; | |
4612 | ||
4613 | str++; | |
4614 | ext = strchr (str, '+'); | |
4615 | ||
4616 | if (ext != NULL) | |
4617 | len = ext - str; | |
4618 | else | |
4619 | len = strlen (str); | |
4620 | ||
4621 | if (len >= 2 && strncmp (str, "no", 2) == 0) | |
4622 | { | |
4623 | adding_ext = 0; | |
4624 | len -= 2; | |
4625 | str += 2; | |
4626 | } | |
4627 | else if (len > 0) | |
4628 | adding_ext = 1; | |
4629 | ||
4630 | if (len == 0) | |
4631 | { | |
4632 | error ("missing feature modifier after %qs", "+no"); | |
4633 | return; | |
4634 | } | |
4635 | ||
4636 | /* Scan over the extensions table trying to find an exact match. */ | |
4637 | for (opt = all_extensions; opt->name != NULL; opt++) | |
4638 | { | |
4639 | if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0) | |
4640 | { | |
4641 | /* Add or remove the extension. */ | |
4642 | if (adding_ext) | |
4643 | aarch64_isa_flags |= opt->flags_on; | |
4644 | else | |
4645 | aarch64_isa_flags &= ~(opt->flags_off); | |
4646 | break; | |
4647 | } | |
4648 | } | |
4649 | ||
4650 | if (opt->name == NULL) | |
4651 | { | |
4652 | /* Extension not found in list. */ | |
4653 | error ("unknown feature modifier %qs", str); | |
4654 | return; | |
4655 | } | |
4656 | ||
4657 | str = ext; | |
4658 | }; | |
4659 | ||
4660 | return; | |
4661 | } | |
4662 | ||
4663 | /* Parse the ARCH string. */ | |
4664 | ||
4665 | static void | |
4666 | aarch64_parse_arch (void) | |
4667 | { | |
4668 | char *ext; | |
4669 | const struct processor *arch; | |
4670 | char *str = (char *) alloca (strlen (aarch64_arch_string) + 1); | |
4671 | size_t len; | |
4672 | ||
4673 | strcpy (str, aarch64_arch_string); | |
4674 | ||
4675 | ext = strchr (str, '+'); | |
4676 | ||
4677 | if (ext != NULL) | |
4678 | len = ext - str; | |
4679 | else | |
4680 | len = strlen (str); | |
4681 | ||
4682 | if (len == 0) | |
4683 | { | |
4684 | error ("missing arch name in -march=%qs", str); | |
4685 | return; | |
4686 | } | |
4687 | ||
4688 | /* Loop through the list of supported ARCHs to find a match. */ | |
4689 | for (arch = all_architectures; arch->name != NULL; arch++) | |
4690 | { | |
4691 | if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0) | |
4692 | { | |
4693 | selected_arch = arch; | |
4694 | aarch64_isa_flags = selected_arch->flags; | |
4695 | selected_cpu = &all_cores[selected_arch->core]; | |
4696 | ||
4697 | if (ext != NULL) | |
4698 | { | |
4699 | /* ARCH string contains at least one extension. */ | |
4700 | aarch64_parse_extension (ext); | |
4701 | } | |
4702 | ||
4703 | return; | |
4704 | } | |
4705 | } | |
4706 | ||
4707 | /* ARCH name not found in list. */ | |
4708 | error ("unknown value %qs for -march", str); | |
4709 | return; | |
4710 | } | |
4711 | ||
4712 | /* Parse the CPU string. */ | |
4713 | ||
4714 | static void | |
4715 | aarch64_parse_cpu (void) | |
4716 | { | |
4717 | char *ext; | |
4718 | const struct processor *cpu; | |
4719 | char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1); | |
4720 | size_t len; | |
4721 | ||
4722 | strcpy (str, aarch64_cpu_string); | |
4723 | ||
4724 | ext = strchr (str, '+'); | |
4725 | ||
4726 | if (ext != NULL) | |
4727 | len = ext - str; | |
4728 | else | |
4729 | len = strlen (str); | |
4730 | ||
4731 | if (len == 0) | |
4732 | { | |
4733 | error ("missing cpu name in -mcpu=%qs", str); | |
4734 | return; | |
4735 | } | |
4736 | ||
4737 | /* Loop through the list of supported CPUs to find a match. */ | |
4738 | for (cpu = all_cores; cpu->name != NULL; cpu++) | |
4739 | { | |
4740 | if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0) | |
4741 | { | |
4742 | selected_cpu = cpu; | |
4743 | aarch64_isa_flags = selected_cpu->flags; | |
4744 | ||
4745 | if (ext != NULL) | |
4746 | { | |
4747 | /* CPU string contains at least one extension. */ | |
4748 | aarch64_parse_extension (ext); | |
4749 | } | |
4750 | ||
4751 | return; | |
4752 | } | |
4753 | } | |
4754 | ||
4755 | /* CPU name not found in list. */ | |
4756 | error ("unknown value %qs for -mcpu", str); | |
4757 | return; | |
4758 | } | |
4759 | ||
4760 | /* Parse the TUNE string. */ | |
4761 | ||
4762 | static void | |
4763 | aarch64_parse_tune (void) | |
4764 | { | |
4765 | const struct processor *cpu; | |
4766 | char *str = (char *) alloca (strlen (aarch64_tune_string) + 1); | |
4767 | strcpy (str, aarch64_tune_string); | |
4768 | ||
4769 | /* Loop through the list of supported CPUs to find a match. */ | |
4770 | for (cpu = all_cores; cpu->name != NULL; cpu++) | |
4771 | { | |
4772 | if (strcmp (cpu->name, str) == 0) | |
4773 | { | |
4774 | selected_tune = cpu; | |
4775 | return; | |
4776 | } | |
4777 | } | |
4778 | ||
4779 | /* CPU name not found in list. */ | |
4780 | error ("unknown value %qs for -mtune", str); | |
4781 | return; | |
4782 | } | |
4783 | ||
4784 | ||
4785 | /* Implement TARGET_OPTION_OVERRIDE. */ | |
4786 | ||
4787 | static void | |
4788 | aarch64_override_options (void) | |
4789 | { | |
4790 | /* march wins over mcpu, so when march is defined, mcpu takes the same value, | |
4791 | otherwise march remains undefined. mtune can be used with either march or | |
4792 | mcpu. */ | |
4793 | ||
4794 | if (aarch64_arch_string) | |
4795 | { | |
4796 | aarch64_parse_arch (); | |
4797 | aarch64_cpu_string = NULL; | |
4798 | } | |
4799 | ||
4800 | if (aarch64_cpu_string) | |
4801 | { | |
4802 | aarch64_parse_cpu (); | |
4803 | selected_arch = NULL; | |
4804 | } | |
4805 | ||
4806 | if (aarch64_tune_string) | |
4807 | { | |
4808 | aarch64_parse_tune (); | |
4809 | } | |
4810 | ||
4811 | initialize_aarch64_code_model (); | |
4812 | ||
4813 | aarch64_build_bitmask_table (); | |
4814 | ||
4815 | /* This target defaults to strict volatile bitfields. */ | |
4816 | if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2)) | |
4817 | flag_strict_volatile_bitfields = 1; | |
4818 | ||
4819 | /* If the user did not specify a processor, choose the default | |
4820 | one for them. This will be the CPU set during configuration using | |
4821 | --with-cpu, otherwise it is "generic". */ | |
4822 | if (!selected_cpu) | |
4823 | { | |
4824 | selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f]; | |
4825 | aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6; | |
4826 | } | |
4827 | ||
4828 | gcc_assert (selected_cpu); | |
4829 | ||
4830 | /* The selected cpu may be an architecture, so lookup tuning by core ID. */ | |
4831 | if (!selected_tune) | |
4832 | selected_tune = &all_cores[selected_cpu->core]; | |
4833 | ||
4834 | aarch64_tune_flags = selected_tune->flags; | |
4835 | aarch64_tune = selected_tune->core; | |
4836 | aarch64_tune_params = selected_tune->tune; | |
4837 | ||
4838 | aarch64_override_options_after_change (); | |
4839 | } | |
4840 | ||
4841 | /* Implement targetm.override_options_after_change. */ | |
4842 | ||
4843 | static void | |
4844 | aarch64_override_options_after_change (void) | |
4845 | { | |
4846 | faked_omit_frame_pointer = false; | |
4847 | ||
4848 | /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so | |
4849 | that aarch64_frame_pointer_required will be called. We need to remember | |
4850 | whether flag_omit_frame_pointer was turned on normally or just faked. */ | |
4851 | ||
4852 | if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer) | |
4853 | { | |
4854 | flag_omit_frame_pointer = true; | |
4855 | faked_omit_frame_pointer = true; | |
4856 | } | |
4857 | } | |
4858 | ||
4859 | static struct machine_function * | |
4860 | aarch64_init_machine_status (void) | |
4861 | { | |
4862 | struct machine_function *machine; | |
4863 | machine = ggc_alloc_cleared_machine_function (); | |
4864 | return machine; | |
4865 | } | |
4866 | ||
4867 | void | |
4868 | aarch64_init_expanders (void) | |
4869 | { | |
4870 | init_machine_status = aarch64_init_machine_status; | |
4871 | } | |
4872 | ||
4873 | /* A checking mechanism for the implementation of the various code models. */ | |
4874 | static void | |
4875 | initialize_aarch64_code_model (void) | |
4876 | { | |
4877 | if (flag_pic) | |
4878 | { | |
4879 | switch (aarch64_cmodel_var) | |
4880 | { | |
4881 | case AARCH64_CMODEL_TINY: | |
4882 | aarch64_cmodel = AARCH64_CMODEL_TINY_PIC; | |
4883 | break; | |
4884 | case AARCH64_CMODEL_SMALL: | |
4885 | aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC; | |
4886 | break; | |
4887 | case AARCH64_CMODEL_LARGE: | |
4888 | sorry ("code model %qs with -f%s", "large", | |
4889 | flag_pic > 1 ? "PIC" : "pic"); | |
4890 | default: | |
4891 | gcc_unreachable (); | |
4892 | } | |
4893 | } | |
4894 | else | |
4895 | aarch64_cmodel = aarch64_cmodel_var; | |
4896 | } | |
4897 | ||
4898 | /* Return true if SYMBOL_REF X binds locally. */ | |
4899 | ||
4900 | static bool | |
4901 | aarch64_symbol_binds_local_p (const_rtx x) | |
4902 | { | |
4903 | return (SYMBOL_REF_DECL (x) | |
4904 | ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) | |
4905 | : SYMBOL_REF_LOCAL_P (x)); | |
4906 | } | |
4907 | ||
4908 | /* Return true if SYMBOL_REF X is thread local */ | |
4909 | static bool | |
4910 | aarch64_tls_symbol_p (rtx x) | |
4911 | { | |
4912 | if (! TARGET_HAVE_TLS) | |
4913 | return false; | |
4914 | ||
4915 | if (GET_CODE (x) != SYMBOL_REF) | |
4916 | return false; | |
4917 | ||
4918 | return SYMBOL_REF_TLS_MODEL (x) != 0; | |
4919 | } | |
4920 | ||
4921 | /* Classify a TLS symbol into one of the TLS kinds. */ | |
4922 | enum aarch64_symbol_type | |
4923 | aarch64_classify_tls_symbol (rtx x) | |
4924 | { | |
4925 | enum tls_model tls_kind = tls_symbolic_operand_type (x); | |
4926 | ||
4927 | switch (tls_kind) | |
4928 | { | |
4929 | case TLS_MODEL_GLOBAL_DYNAMIC: | |
4930 | case TLS_MODEL_LOCAL_DYNAMIC: | |
4931 | return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD; | |
4932 | ||
4933 | case TLS_MODEL_INITIAL_EXEC: | |
4934 | return SYMBOL_SMALL_GOTTPREL; | |
4935 | ||
4936 | case TLS_MODEL_LOCAL_EXEC: | |
4937 | return SYMBOL_SMALL_TPREL; | |
4938 | ||
4939 | case TLS_MODEL_EMULATED: | |
4940 | case TLS_MODEL_NONE: | |
4941 | return SYMBOL_FORCE_TO_MEM; | |
4942 | ||
4943 | default: | |
4944 | gcc_unreachable (); | |
4945 | } | |
4946 | } | |
4947 | ||
4948 | /* Return the method that should be used to access SYMBOL_REF or | |
4949 | LABEL_REF X in context CONTEXT. */ | |
4950 | enum aarch64_symbol_type | |
4951 | aarch64_classify_symbol (rtx x, | |
4952 | enum aarch64_symbol_context context ATTRIBUTE_UNUSED) | |
4953 | { | |
4954 | if (GET_CODE (x) == LABEL_REF) | |
4955 | { | |
4956 | switch (aarch64_cmodel) | |
4957 | { | |
4958 | case AARCH64_CMODEL_LARGE: | |
4959 | return SYMBOL_FORCE_TO_MEM; | |
4960 | ||
4961 | case AARCH64_CMODEL_TINY_PIC: | |
4962 | case AARCH64_CMODEL_TINY: | |
4963 | case AARCH64_CMODEL_SMALL_PIC: | |
4964 | case AARCH64_CMODEL_SMALL: | |
4965 | return SYMBOL_SMALL_ABSOLUTE; | |
4966 | ||
4967 | default: | |
4968 | gcc_unreachable (); | |
4969 | } | |
4970 | } | |
4971 | ||
4972 | gcc_assert (GET_CODE (x) == SYMBOL_REF); | |
4973 | ||
4974 | switch (aarch64_cmodel) | |
4975 | { | |
4976 | case AARCH64_CMODEL_LARGE: | |
4977 | return SYMBOL_FORCE_TO_MEM; | |
4978 | ||
4979 | case AARCH64_CMODEL_TINY: | |
4980 | case AARCH64_CMODEL_SMALL: | |
4981 | ||
4982 | /* This is needed to get DFmode, TImode constants to be loaded off | |
4983 | the constant pool. Is it necessary to dump TImode values into | |
4984 | the constant pool. We don't handle TImode constant loads properly | |
4985 | yet and hence need to use the constant pool. */ | |
4986 | if (CONSTANT_POOL_ADDRESS_P (x)) | |
4987 | return SYMBOL_FORCE_TO_MEM; | |
4988 | ||
4989 | if (aarch64_tls_symbol_p (x)) | |
4990 | return aarch64_classify_tls_symbol (x); | |
4991 | ||
4992 | if (SYMBOL_REF_WEAK (x)) | |
4993 | return SYMBOL_FORCE_TO_MEM; | |
4994 | ||
4995 | return SYMBOL_SMALL_ABSOLUTE; | |
4996 | ||
4997 | case AARCH64_CMODEL_TINY_PIC: | |
4998 | case AARCH64_CMODEL_SMALL_PIC: | |
4999 | ||
5000 | if (CONSTANT_POOL_ADDRESS_P (x)) | |
5001 | return SYMBOL_FORCE_TO_MEM; | |
5002 | ||
5003 | if (aarch64_tls_symbol_p (x)) | |
5004 | return aarch64_classify_tls_symbol (x); | |
5005 | ||
5006 | if (!aarch64_symbol_binds_local_p (x)) | |
5007 | return SYMBOL_SMALL_GOT; | |
5008 | ||
5009 | return SYMBOL_SMALL_ABSOLUTE; | |
5010 | ||
5011 | default: | |
5012 | gcc_unreachable (); | |
5013 | } | |
5014 | /* By default push everything into the constant pool. */ | |
5015 | return SYMBOL_FORCE_TO_MEM; | |
5016 | } | |
5017 | ||
5018 | /* Return true if X is a symbolic constant that can be used in context | |
5019 | CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */ | |
5020 | ||
5021 | bool | |
5022 | aarch64_symbolic_constant_p (rtx x, enum aarch64_symbol_context context, | |
5023 | enum aarch64_symbol_type *symbol_type) | |
5024 | { | |
5025 | rtx offset; | |
5026 | split_const (x, &x, &offset); | |
5027 | if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) | |
5028 | *symbol_type = aarch64_classify_symbol (x, context); | |
5029 | else | |
5030 | return false; | |
5031 | ||
5032 | /* No checking of offset at this point. */ | |
5033 | return true; | |
5034 | } | |
5035 | ||
5036 | bool | |
5037 | aarch64_constant_address_p (rtx x) | |
5038 | { | |
5039 | return (CONSTANT_P (x) && memory_address_p (DImode, x)); | |
5040 | } | |
5041 | ||
5042 | bool | |
5043 | aarch64_legitimate_pic_operand_p (rtx x) | |
5044 | { | |
5045 | if (GET_CODE (x) == SYMBOL_REF | |
5046 | || (GET_CODE (x) == CONST | |
5047 | && GET_CODE (XEXP (x, 0)) == PLUS | |
5048 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)) | |
5049 | return false; | |
5050 | ||
5051 | return true; | |
5052 | } | |
5053 | ||
3520f7cc JG |
5054 | /* Return true if X holds either a quarter-precision or |
5055 | floating-point +0.0 constant. */ | |
5056 | static bool | |
5057 | aarch64_valid_floating_const (enum machine_mode mode, rtx x) | |
5058 | { | |
5059 | if (!CONST_DOUBLE_P (x)) | |
5060 | return false; | |
5061 | ||
5062 | /* TODO: We could handle moving 0.0 to a TFmode register, | |
5063 | but first we would like to refactor the movtf_aarch64 | |
5064 | to be more amicable to split moves properly and | |
5065 | correctly gate on TARGET_SIMD. For now - reject all | |
5066 | constants which are not to SFmode or DFmode registers. */ | |
5067 | if (!(mode == SFmode || mode == DFmode)) | |
5068 | return false; | |
5069 | ||
5070 | if (aarch64_float_const_zero_rtx_p (x)) | |
5071 | return true; | |
5072 | return aarch64_float_const_representable_p (x); | |
5073 | } | |
5074 | ||
43e9d192 IB |
5075 | static bool |
5076 | aarch64_legitimate_constant_p (enum machine_mode mode, rtx x) | |
5077 | { | |
5078 | /* Do not allow vector struct mode constants. We could support | |
5079 | 0 and -1 easily, but they need support in aarch64-simd.md. */ | |
5080 | if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode)) | |
5081 | return false; | |
5082 | ||
5083 | /* This could probably go away because | |
5084 | we now decompose CONST_INTs according to expand_mov_immediate. */ | |
5085 | if ((GET_CODE (x) == CONST_VECTOR | |
5086 | && aarch64_simd_valid_immediate (x, mode, false, | |
5087 | NULL, NULL, NULL, NULL, NULL) != -1) | |
3520f7cc JG |
5088 | || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x)) |
5089 | return !targetm.cannot_force_const_mem (mode, x); | |
43e9d192 IB |
5090 | |
5091 | if (GET_CODE (x) == HIGH | |
5092 | && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) | |
5093 | return true; | |
5094 | ||
5095 | return aarch64_constant_address_p (x); | |
5096 | } | |
5097 | ||
a5bc806c | 5098 | rtx |
43e9d192 IB |
5099 | aarch64_load_tp (rtx target) |
5100 | { | |
5101 | if (!target | |
5102 | || GET_MODE (target) != Pmode | |
5103 | || !register_operand (target, Pmode)) | |
5104 | target = gen_reg_rtx (Pmode); | |
5105 | ||
5106 | /* Can return in any reg. */ | |
5107 | emit_insn (gen_aarch64_load_tp_hard (target)); | |
5108 | return target; | |
5109 | } | |
5110 | ||
43e9d192 IB |
5111 | /* On AAPCS systems, this is the "struct __va_list". */ |
5112 | static GTY(()) tree va_list_type; | |
5113 | ||
5114 | /* Implement TARGET_BUILD_BUILTIN_VA_LIST. | |
5115 | Return the type to use as __builtin_va_list. | |
5116 | ||
5117 | AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as: | |
5118 | ||
5119 | struct __va_list | |
5120 | { | |
5121 | void *__stack; | |
5122 | void *__gr_top; | |
5123 | void *__vr_top; | |
5124 | int __gr_offs; | |
5125 | int __vr_offs; | |
5126 | }; */ | |
5127 | ||
5128 | static tree | |
5129 | aarch64_build_builtin_va_list (void) | |
5130 | { | |
5131 | tree va_list_name; | |
5132 | tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff; | |
5133 | ||
5134 | /* Create the type. */ | |
5135 | va_list_type = lang_hooks.types.make_type (RECORD_TYPE); | |
5136 | /* Give it the required name. */ | |
5137 | va_list_name = build_decl (BUILTINS_LOCATION, | |
5138 | TYPE_DECL, | |
5139 | get_identifier ("__va_list"), | |
5140 | va_list_type); | |
5141 | DECL_ARTIFICIAL (va_list_name) = 1; | |
5142 | TYPE_NAME (va_list_type) = va_list_name; | |
665c56c6 | 5143 | TYPE_STUB_DECL (va_list_type) = va_list_name; |
43e9d192 IB |
5144 | |
5145 | /* Create the fields. */ | |
5146 | f_stack = build_decl (BUILTINS_LOCATION, | |
5147 | FIELD_DECL, get_identifier ("__stack"), | |
5148 | ptr_type_node); | |
5149 | f_grtop = build_decl (BUILTINS_LOCATION, | |
5150 | FIELD_DECL, get_identifier ("__gr_top"), | |
5151 | ptr_type_node); | |
5152 | f_vrtop = build_decl (BUILTINS_LOCATION, | |
5153 | FIELD_DECL, get_identifier ("__vr_top"), | |
5154 | ptr_type_node); | |
5155 | f_groff = build_decl (BUILTINS_LOCATION, | |
5156 | FIELD_DECL, get_identifier ("__gr_offs"), | |
5157 | integer_type_node); | |
5158 | f_vroff = build_decl (BUILTINS_LOCATION, | |
5159 | FIELD_DECL, get_identifier ("__vr_offs"), | |
5160 | integer_type_node); | |
5161 | ||
5162 | DECL_ARTIFICIAL (f_stack) = 1; | |
5163 | DECL_ARTIFICIAL (f_grtop) = 1; | |
5164 | DECL_ARTIFICIAL (f_vrtop) = 1; | |
5165 | DECL_ARTIFICIAL (f_groff) = 1; | |
5166 | DECL_ARTIFICIAL (f_vroff) = 1; | |
5167 | ||
5168 | DECL_FIELD_CONTEXT (f_stack) = va_list_type; | |
5169 | DECL_FIELD_CONTEXT (f_grtop) = va_list_type; | |
5170 | DECL_FIELD_CONTEXT (f_vrtop) = va_list_type; | |
5171 | DECL_FIELD_CONTEXT (f_groff) = va_list_type; | |
5172 | DECL_FIELD_CONTEXT (f_vroff) = va_list_type; | |
5173 | ||
5174 | TYPE_FIELDS (va_list_type) = f_stack; | |
5175 | DECL_CHAIN (f_stack) = f_grtop; | |
5176 | DECL_CHAIN (f_grtop) = f_vrtop; | |
5177 | DECL_CHAIN (f_vrtop) = f_groff; | |
5178 | DECL_CHAIN (f_groff) = f_vroff; | |
5179 | ||
5180 | /* Compute its layout. */ | |
5181 | layout_type (va_list_type); | |
5182 | ||
5183 | return va_list_type; | |
5184 | } | |
5185 | ||
5186 | /* Implement TARGET_EXPAND_BUILTIN_VA_START. */ | |
5187 | static void | |
5188 | aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED) | |
5189 | { | |
5190 | const CUMULATIVE_ARGS *cum; | |
5191 | tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff; | |
5192 | tree stack, grtop, vrtop, groff, vroff; | |
5193 | tree t; | |
5194 | int gr_save_area_size; | |
5195 | int vr_save_area_size; | |
5196 | int vr_offset; | |
5197 | ||
5198 | cum = &crtl->args.info; | |
5199 | gr_save_area_size | |
5200 | = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD; | |
5201 | vr_save_area_size | |
5202 | = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG; | |
5203 | ||
5204 | if (TARGET_GENERAL_REGS_ONLY) | |
5205 | { | |
5206 | if (cum->aapcs_nvrn > 0) | |
5207 | sorry ("%qs and floating point or vector arguments", | |
5208 | "-mgeneral-regs-only"); | |
5209 | vr_save_area_size = 0; | |
5210 | } | |
5211 | ||
5212 | f_stack = TYPE_FIELDS (va_list_type_node); | |
5213 | f_grtop = DECL_CHAIN (f_stack); | |
5214 | f_vrtop = DECL_CHAIN (f_grtop); | |
5215 | f_groff = DECL_CHAIN (f_vrtop); | |
5216 | f_vroff = DECL_CHAIN (f_groff); | |
5217 | ||
5218 | stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack, | |
5219 | NULL_TREE); | |
5220 | grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop, | |
5221 | NULL_TREE); | |
5222 | vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop, | |
5223 | NULL_TREE); | |
5224 | groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff, | |
5225 | NULL_TREE); | |
5226 | vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff, | |
5227 | NULL_TREE); | |
5228 | ||
5229 | /* Emit code to initialize STACK, which points to the next varargs stack | |
5230 | argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used | |
5231 | by named arguments. STACK is 8-byte aligned. */ | |
5232 | t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx); | |
5233 | if (cum->aapcs_stack_size > 0) | |
5234 | t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD); | |
5235 | t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t); | |
5236 | expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); | |
5237 | ||
5238 | /* Emit code to initialize GRTOP, the top of the GR save area. | |
5239 | virtual_incoming_args_rtx should have been 16 byte aligned. */ | |
5240 | t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx); | |
5241 | t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t); | |
5242 | expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); | |
5243 | ||
5244 | /* Emit code to initialize VRTOP, the top of the VR save area. | |
5245 | This address is gr_save_area_bytes below GRTOP, rounded | |
5246 | down to the next 16-byte boundary. */ | |
5247 | t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx); | |
5248 | vr_offset = AARCH64_ROUND_UP (gr_save_area_size, | |
5249 | STACK_BOUNDARY / BITS_PER_UNIT); | |
5250 | ||
5251 | if (vr_offset) | |
5252 | t = fold_build_pointer_plus_hwi (t, -vr_offset); | |
5253 | t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t); | |
5254 | expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); | |
5255 | ||
5256 | /* Emit code to initialize GROFF, the offset from GRTOP of the | |
5257 | next GPR argument. */ | |
5258 | t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff, | |
5259 | build_int_cst (TREE_TYPE (groff), -gr_save_area_size)); | |
5260 | expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); | |
5261 | ||
5262 | /* Likewise emit code to initialize VROFF, the offset from FTOP | |
5263 | of the next VR argument. */ | |
5264 | t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff, | |
5265 | build_int_cst (TREE_TYPE (vroff), -vr_save_area_size)); | |
5266 | expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); | |
5267 | } | |
5268 | ||
5269 | /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */ | |
5270 | ||
5271 | static tree | |
5272 | aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, | |
5273 | gimple_seq *post_p ATTRIBUTE_UNUSED) | |
5274 | { | |
5275 | tree addr; | |
5276 | bool indirect_p; | |
5277 | bool is_ha; /* is HFA or HVA. */ | |
5278 | bool dw_align; /* double-word align. */ | |
5279 | enum machine_mode ag_mode = VOIDmode; | |
5280 | int nregs; | |
5281 | enum machine_mode mode; | |
5282 | ||
5283 | tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff; | |
5284 | tree stack, f_top, f_off, off, arg, roundup, on_stack; | |
5285 | HOST_WIDE_INT size, rsize, adjust, align; | |
5286 | tree t, u, cond1, cond2; | |
5287 | ||
5288 | indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false); | |
5289 | if (indirect_p) | |
5290 | type = build_pointer_type (type); | |
5291 | ||
5292 | mode = TYPE_MODE (type); | |
5293 | ||
5294 | f_stack = TYPE_FIELDS (va_list_type_node); | |
5295 | f_grtop = DECL_CHAIN (f_stack); | |
5296 | f_vrtop = DECL_CHAIN (f_grtop); | |
5297 | f_groff = DECL_CHAIN (f_vrtop); | |
5298 | f_vroff = DECL_CHAIN (f_groff); | |
5299 | ||
5300 | stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist), | |
5301 | f_stack, NULL_TREE); | |
5302 | size = int_size_in_bytes (type); | |
5303 | align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT; | |
5304 | ||
5305 | dw_align = false; | |
5306 | adjust = 0; | |
5307 | if (aarch64_vfp_is_call_or_return_candidate (mode, | |
5308 | type, | |
5309 | &ag_mode, | |
5310 | &nregs, | |
5311 | &is_ha)) | |
5312 | { | |
5313 | /* TYPE passed in fp/simd registers. */ | |
5314 | if (TARGET_GENERAL_REGS_ONLY) | |
5315 | sorry ("%qs and floating point or vector arguments", | |
5316 | "-mgeneral-regs-only"); | |
5317 | ||
5318 | f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), | |
5319 | unshare_expr (valist), f_vrtop, NULL_TREE); | |
5320 | f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), | |
5321 | unshare_expr (valist), f_vroff, NULL_TREE); | |
5322 | ||
5323 | rsize = nregs * UNITS_PER_VREG; | |
5324 | ||
5325 | if (is_ha) | |
5326 | { | |
5327 | if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG) | |
5328 | adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode); | |
5329 | } | |
5330 | else if (BLOCK_REG_PADDING (mode, type, 1) == downward | |
5331 | && size < UNITS_PER_VREG) | |
5332 | { | |
5333 | adjust = UNITS_PER_VREG - size; | |
5334 | } | |
5335 | } | |
5336 | else | |
5337 | { | |
5338 | /* TYPE passed in general registers. */ | |
5339 | f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), | |
5340 | unshare_expr (valist), f_grtop, NULL_TREE); | |
5341 | f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff), | |
5342 | unshare_expr (valist), f_groff, NULL_TREE); | |
5343 | rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD; | |
5344 | nregs = rsize / UNITS_PER_WORD; | |
5345 | ||
5346 | if (align > 8) | |
5347 | dw_align = true; | |
5348 | ||
5349 | if (BLOCK_REG_PADDING (mode, type, 1) == downward | |
5350 | && size < UNITS_PER_WORD) | |
5351 | { | |
5352 | adjust = UNITS_PER_WORD - size; | |
5353 | } | |
5354 | } | |
5355 | ||
5356 | /* Get a local temporary for the field value. */ | |
5357 | off = get_initialized_tmp_var (f_off, pre_p, NULL); | |
5358 | ||
5359 | /* Emit code to branch if off >= 0. */ | |
5360 | t = build2 (GE_EXPR, boolean_type_node, off, | |
5361 | build_int_cst (TREE_TYPE (off), 0)); | |
5362 | cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE); | |
5363 | ||
5364 | if (dw_align) | |
5365 | { | |
5366 | /* Emit: offs = (offs + 15) & -16. */ | |
5367 | t = build2 (PLUS_EXPR, TREE_TYPE (off), off, | |
5368 | build_int_cst (TREE_TYPE (off), 15)); | |
5369 | t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t, | |
5370 | build_int_cst (TREE_TYPE (off), -16)); | |
5371 | roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t); | |
5372 | } | |
5373 | else | |
5374 | roundup = NULL; | |
5375 | ||
5376 | /* Update ap.__[g|v]r_offs */ | |
5377 | t = build2 (PLUS_EXPR, TREE_TYPE (off), off, | |
5378 | build_int_cst (TREE_TYPE (off), rsize)); | |
5379 | t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t); | |
5380 | ||
5381 | /* String up. */ | |
5382 | if (roundup) | |
5383 | t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t); | |
5384 | ||
5385 | /* [cond2] if (ap.__[g|v]r_offs > 0) */ | |
5386 | u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off), | |
5387 | build_int_cst (TREE_TYPE (f_off), 0)); | |
5388 | cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE); | |
5389 | ||
5390 | /* String up: make sure the assignment happens before the use. */ | |
5391 | t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2); | |
5392 | COND_EXPR_ELSE (cond1) = t; | |
5393 | ||
5394 | /* Prepare the trees handling the argument that is passed on the stack; | |
5395 | the top level node will store in ON_STACK. */ | |
5396 | arg = get_initialized_tmp_var (stack, pre_p, NULL); | |
5397 | if (align > 8) | |
5398 | { | |
5399 | /* if (alignof(type) > 8) (arg = arg + 15) & -16; */ | |
5400 | t = fold_convert (intDI_type_node, arg); | |
5401 | t = build2 (PLUS_EXPR, TREE_TYPE (t), t, | |
5402 | build_int_cst (TREE_TYPE (t), 15)); | |
5403 | t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, | |
5404 | build_int_cst (TREE_TYPE (t), -16)); | |
5405 | t = fold_convert (TREE_TYPE (arg), t); | |
5406 | roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t); | |
5407 | } | |
5408 | else | |
5409 | roundup = NULL; | |
5410 | /* Advance ap.__stack */ | |
5411 | t = fold_convert (intDI_type_node, arg); | |
5412 | t = build2 (PLUS_EXPR, TREE_TYPE (t), t, | |
5413 | build_int_cst (TREE_TYPE (t), size + 7)); | |
5414 | t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, | |
5415 | build_int_cst (TREE_TYPE (t), -8)); | |
5416 | t = fold_convert (TREE_TYPE (arg), t); | |
5417 | t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t); | |
5418 | /* String up roundup and advance. */ | |
5419 | if (roundup) | |
5420 | t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t); | |
5421 | /* String up with arg */ | |
5422 | on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg); | |
5423 | /* Big-endianness related address adjustment. */ | |
5424 | if (BLOCK_REG_PADDING (mode, type, 1) == downward | |
5425 | && size < UNITS_PER_WORD) | |
5426 | { | |
5427 | t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg, | |
5428 | size_int (UNITS_PER_WORD - size)); | |
5429 | on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t); | |
5430 | } | |
5431 | ||
5432 | COND_EXPR_THEN (cond1) = unshare_expr (on_stack); | |
5433 | COND_EXPR_THEN (cond2) = unshare_expr (on_stack); | |
5434 | ||
5435 | /* Adjustment to OFFSET in the case of BIG_ENDIAN. */ | |
5436 | t = off; | |
5437 | if (adjust) | |
5438 | t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off, | |
5439 | build_int_cst (TREE_TYPE (off), adjust)); | |
5440 | ||
5441 | t = fold_convert (sizetype, t); | |
5442 | t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t); | |
5443 | ||
5444 | if (is_ha) | |
5445 | { | |
5446 | /* type ha; // treat as "struct {ftype field[n];}" | |
5447 | ... [computing offs] | |
5448 | for (i = 0; i <nregs; ++i, offs += 16) | |
5449 | ha.field[i] = *((ftype *)(ap.__vr_top + offs)); | |
5450 | return ha; */ | |
5451 | int i; | |
5452 | tree tmp_ha, field_t, field_ptr_t; | |
5453 | ||
5454 | /* Declare a local variable. */ | |
5455 | tmp_ha = create_tmp_var_raw (type, "ha"); | |
5456 | gimple_add_tmp_var (tmp_ha); | |
5457 | ||
5458 | /* Establish the base type. */ | |
5459 | switch (ag_mode) | |
5460 | { | |
5461 | case SFmode: | |
5462 | field_t = float_type_node; | |
5463 | field_ptr_t = float_ptr_type_node; | |
5464 | break; | |
5465 | case DFmode: | |
5466 | field_t = double_type_node; | |
5467 | field_ptr_t = double_ptr_type_node; | |
5468 | break; | |
5469 | case TFmode: | |
5470 | field_t = long_double_type_node; | |
5471 | field_ptr_t = long_double_ptr_type_node; | |
5472 | break; | |
5473 | /* The half precision and quad precision are not fully supported yet. Enable | |
5474 | the following code after the support is complete. Need to find the correct | |
5475 | type node for __fp16 *. */ | |
5476 | #if 0 | |
5477 | case HFmode: | |
5478 | field_t = float_type_node; | |
5479 | field_ptr_t = float_ptr_type_node; | |
5480 | break; | |
5481 | #endif | |
5482 | case V2SImode: | |
5483 | case V4SImode: | |
5484 | { | |
5485 | tree innertype = make_signed_type (GET_MODE_PRECISION (SImode)); | |
5486 | field_t = build_vector_type_for_mode (innertype, ag_mode); | |
5487 | field_ptr_t = build_pointer_type (field_t); | |
5488 | } | |
5489 | break; | |
5490 | default: | |
5491 | gcc_assert (0); | |
5492 | } | |
5493 | ||
5494 | /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */ | |
5495 | tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha); | |
5496 | addr = t; | |
5497 | t = fold_convert (field_ptr_t, addr); | |
5498 | t = build2 (MODIFY_EXPR, field_t, | |
5499 | build1 (INDIRECT_REF, field_t, tmp_ha), | |
5500 | build1 (INDIRECT_REF, field_t, t)); | |
5501 | ||
5502 | /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */ | |
5503 | for (i = 1; i < nregs; ++i) | |
5504 | { | |
5505 | addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG); | |
5506 | u = fold_convert (field_ptr_t, addr); | |
5507 | u = build2 (MODIFY_EXPR, field_t, | |
5508 | build2 (MEM_REF, field_t, tmp_ha, | |
5509 | build_int_cst (field_ptr_t, | |
5510 | (i * | |
5511 | int_size_in_bytes (field_t)))), | |
5512 | build1 (INDIRECT_REF, field_t, u)); | |
5513 | t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u); | |
5514 | } | |
5515 | ||
5516 | u = fold_convert (TREE_TYPE (f_top), tmp_ha); | |
5517 | t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u); | |
5518 | } | |
5519 | ||
5520 | COND_EXPR_ELSE (cond2) = t; | |
5521 | addr = fold_convert (build_pointer_type (type), cond1); | |
5522 | addr = build_va_arg_indirect_ref (addr); | |
5523 | ||
5524 | if (indirect_p) | |
5525 | addr = build_va_arg_indirect_ref (addr); | |
5526 | ||
5527 | return addr; | |
5528 | } | |
5529 | ||
5530 | /* Implement TARGET_SETUP_INCOMING_VARARGS. */ | |
5531 | ||
5532 | static void | |
5533 | aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode, | |
5534 | tree type, int *pretend_size ATTRIBUTE_UNUSED, | |
5535 | int no_rtl) | |
5536 | { | |
5537 | CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); | |
5538 | CUMULATIVE_ARGS local_cum; | |
5539 | int gr_saved, vr_saved; | |
5540 | ||
5541 | /* The caller has advanced CUM up to, but not beyond, the last named | |
5542 | argument. Advance a local copy of CUM past the last "real" named | |
5543 | argument, to find out how many registers are left over. */ | |
5544 | local_cum = *cum; | |
5545 | aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true); | |
5546 | ||
5547 | /* Found out how many registers we need to save. */ | |
5548 | gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn; | |
5549 | vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn; | |
5550 | ||
5551 | if (TARGET_GENERAL_REGS_ONLY) | |
5552 | { | |
5553 | if (local_cum.aapcs_nvrn > 0) | |
5554 | sorry ("%qs and floating point or vector arguments", | |
5555 | "-mgeneral-regs-only"); | |
5556 | vr_saved = 0; | |
5557 | } | |
5558 | ||
5559 | if (!no_rtl) | |
5560 | { | |
5561 | if (gr_saved > 0) | |
5562 | { | |
5563 | rtx ptr, mem; | |
5564 | ||
5565 | /* virtual_incoming_args_rtx should have been 16-byte aligned. */ | |
5566 | ptr = plus_constant (Pmode, virtual_incoming_args_rtx, | |
5567 | - gr_saved * UNITS_PER_WORD); | |
5568 | mem = gen_frame_mem (BLKmode, ptr); | |
5569 | set_mem_alias_set (mem, get_varargs_alias_set ()); | |
5570 | ||
5571 | move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM, | |
5572 | mem, gr_saved); | |
5573 | } | |
5574 | if (vr_saved > 0) | |
5575 | { | |
5576 | /* We can't use move_block_from_reg, because it will use | |
5577 | the wrong mode, storing D regs only. */ | |
5578 | enum machine_mode mode = TImode; | |
5579 | int off, i; | |
5580 | ||
5581 | /* Set OFF to the offset from virtual_incoming_args_rtx of | |
5582 | the first vector register. The VR save area lies below | |
5583 | the GR one, and is aligned to 16 bytes. */ | |
5584 | off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD, | |
5585 | STACK_BOUNDARY / BITS_PER_UNIT); | |
5586 | off -= vr_saved * UNITS_PER_VREG; | |
5587 | ||
5588 | for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i) | |
5589 | { | |
5590 | rtx ptr, mem; | |
5591 | ||
5592 | ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off); | |
5593 | mem = gen_frame_mem (mode, ptr); | |
5594 | set_mem_alias_set (mem, get_varargs_alias_set ()); | |
5595 | aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i)); | |
5596 | off += UNITS_PER_VREG; | |
5597 | } | |
5598 | } | |
5599 | } | |
5600 | ||
5601 | /* We don't save the size into *PRETEND_SIZE because we want to avoid | |
5602 | any complication of having crtl->args.pretend_args_size changed. */ | |
5603 | cfun->machine->saved_varargs_size | |
5604 | = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD, | |
5605 | STACK_BOUNDARY / BITS_PER_UNIT) | |
5606 | + vr_saved * UNITS_PER_VREG); | |
5607 | } | |
5608 | ||
5609 | static void | |
5610 | aarch64_conditional_register_usage (void) | |
5611 | { | |
5612 | int i; | |
5613 | if (!TARGET_FLOAT) | |
5614 | { | |
5615 | for (i = V0_REGNUM; i <= V31_REGNUM; i++) | |
5616 | { | |
5617 | fixed_regs[i] = 1; | |
5618 | call_used_regs[i] = 1; | |
5619 | } | |
5620 | } | |
5621 | } | |
5622 | ||
5623 | /* Walk down the type tree of TYPE counting consecutive base elements. | |
5624 | If *MODEP is VOIDmode, then set it to the first valid floating point | |
5625 | type. If a non-floating point type is found, or if a floating point | |
5626 | type that doesn't match a non-VOIDmode *MODEP is found, then return -1, | |
5627 | otherwise return the count in the sub-tree. */ | |
5628 | static int | |
5629 | aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) | |
5630 | { | |
5631 | enum machine_mode mode; | |
5632 | HOST_WIDE_INT size; | |
5633 | ||
5634 | switch (TREE_CODE (type)) | |
5635 | { | |
5636 | case REAL_TYPE: | |
5637 | mode = TYPE_MODE (type); | |
5638 | if (mode != DFmode && mode != SFmode && mode != TFmode) | |
5639 | return -1; | |
5640 | ||
5641 | if (*modep == VOIDmode) | |
5642 | *modep = mode; | |
5643 | ||
5644 | if (*modep == mode) | |
5645 | return 1; | |
5646 | ||
5647 | break; | |
5648 | ||
5649 | case COMPLEX_TYPE: | |
5650 | mode = TYPE_MODE (TREE_TYPE (type)); | |
5651 | if (mode != DFmode && mode != SFmode && mode != TFmode) | |
5652 | return -1; | |
5653 | ||
5654 | if (*modep == VOIDmode) | |
5655 | *modep = mode; | |
5656 | ||
5657 | if (*modep == mode) | |
5658 | return 2; | |
5659 | ||
5660 | break; | |
5661 | ||
5662 | case VECTOR_TYPE: | |
5663 | /* Use V2SImode and V4SImode as representatives of all 64-bit | |
5664 | and 128-bit vector types. */ | |
5665 | size = int_size_in_bytes (type); | |
5666 | switch (size) | |
5667 | { | |
5668 | case 8: | |
5669 | mode = V2SImode; | |
5670 | break; | |
5671 | case 16: | |
5672 | mode = V4SImode; | |
5673 | break; | |
5674 | default: | |
5675 | return -1; | |
5676 | } | |
5677 | ||
5678 | if (*modep == VOIDmode) | |
5679 | *modep = mode; | |
5680 | ||
5681 | /* Vector modes are considered to be opaque: two vectors are | |
5682 | equivalent for the purposes of being homogeneous aggregates | |
5683 | if they are the same size. */ | |
5684 | if (*modep == mode) | |
5685 | return 1; | |
5686 | ||
5687 | break; | |
5688 | ||
5689 | case ARRAY_TYPE: | |
5690 | { | |
5691 | int count; | |
5692 | tree index = TYPE_DOMAIN (type); | |
5693 | ||
5694 | /* Can't handle incomplete types. */ | |
5695 | if (!COMPLETE_TYPE_P (type)) | |
5696 | return -1; | |
5697 | ||
5698 | count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep); | |
5699 | if (count == -1 | |
5700 | || !index | |
5701 | || !TYPE_MAX_VALUE (index) | |
5702 | || !host_integerp (TYPE_MAX_VALUE (index), 1) | |
5703 | || !TYPE_MIN_VALUE (index) | |
5704 | || !host_integerp (TYPE_MIN_VALUE (index), 1) | |
5705 | || count < 0) | |
5706 | return -1; | |
5707 | ||
5708 | count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1) | |
5709 | - tree_low_cst (TYPE_MIN_VALUE (index), 1)); | |
5710 | ||
5711 | /* There must be no padding. */ | |
5712 | if (!host_integerp (TYPE_SIZE (type), 1) | |
5713 | || (tree_low_cst (TYPE_SIZE (type), 1) | |
5714 | != count * GET_MODE_BITSIZE (*modep))) | |
5715 | return -1; | |
5716 | ||
5717 | return count; | |
5718 | } | |
5719 | ||
5720 | case RECORD_TYPE: | |
5721 | { | |
5722 | int count = 0; | |
5723 | int sub_count; | |
5724 | tree field; | |
5725 | ||
5726 | /* Can't handle incomplete types. */ | |
5727 | if (!COMPLETE_TYPE_P (type)) | |
5728 | return -1; | |
5729 | ||
5730 | for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) | |
5731 | { | |
5732 | if (TREE_CODE (field) != FIELD_DECL) | |
5733 | continue; | |
5734 | ||
5735 | sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep); | |
5736 | if (sub_count < 0) | |
5737 | return -1; | |
5738 | count += sub_count; | |
5739 | } | |
5740 | ||
5741 | /* There must be no padding. */ | |
5742 | if (!host_integerp (TYPE_SIZE (type), 1) | |
5743 | || (tree_low_cst (TYPE_SIZE (type), 1) | |
5744 | != count * GET_MODE_BITSIZE (*modep))) | |
5745 | return -1; | |
5746 | ||
5747 | return count; | |
5748 | } | |
5749 | ||
5750 | case UNION_TYPE: | |
5751 | case QUAL_UNION_TYPE: | |
5752 | { | |
5753 | /* These aren't very interesting except in a degenerate case. */ | |
5754 | int count = 0; | |
5755 | int sub_count; | |
5756 | tree field; | |
5757 | ||
5758 | /* Can't handle incomplete types. */ | |
5759 | if (!COMPLETE_TYPE_P (type)) | |
5760 | return -1; | |
5761 | ||
5762 | for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) | |
5763 | { | |
5764 | if (TREE_CODE (field) != FIELD_DECL) | |
5765 | continue; | |
5766 | ||
5767 | sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep); | |
5768 | if (sub_count < 0) | |
5769 | return -1; | |
5770 | count = count > sub_count ? count : sub_count; | |
5771 | } | |
5772 | ||
5773 | /* There must be no padding. */ | |
5774 | if (!host_integerp (TYPE_SIZE (type), 1) | |
5775 | || (tree_low_cst (TYPE_SIZE (type), 1) | |
5776 | != count * GET_MODE_BITSIZE (*modep))) | |
5777 | return -1; | |
5778 | ||
5779 | return count; | |
5780 | } | |
5781 | ||
5782 | default: | |
5783 | break; | |
5784 | } | |
5785 | ||
5786 | return -1; | |
5787 | } | |
5788 | ||
5789 | /* Return TRUE if the type, as described by TYPE and MODE, is a composite | |
5790 | type as described in AAPCS64 \S 4.3. This includes aggregate, union and | |
5791 | array types. The C99 floating-point complex types are also considered | |
5792 | as composite types, according to AAPCS64 \S 7.1.1. The complex integer | |
5793 | types, which are GCC extensions and out of the scope of AAPCS64, are | |
5794 | treated as composite types here as well. | |
5795 | ||
5796 | Note that MODE itself is not sufficient in determining whether a type | |
5797 | is such a composite type or not. This is because | |
5798 | stor-layout.c:compute_record_mode may have already changed the MODE | |
5799 | (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a | |
5800 | structure with only one field may have its MODE set to the mode of the | |
5801 | field. Also an integer mode whose size matches the size of the | |
5802 | RECORD_TYPE type may be used to substitute the original mode | |
5803 | (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be | |
5804 | solely relied on. */ | |
5805 | ||
5806 | static bool | |
5807 | aarch64_composite_type_p (const_tree type, | |
5808 | enum machine_mode mode) | |
5809 | { | |
5810 | if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)) | |
5811 | return true; | |
5812 | ||
5813 | if (mode == BLKmode | |
5814 | || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT | |
5815 | || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT) | |
5816 | return true; | |
5817 | ||
5818 | return false; | |
5819 | } | |
5820 | ||
5821 | /* Return TRUE if the type, as described by TYPE and MODE, is a short vector | |
5822 | type as described in AAPCS64 \S 4.1.2. | |
5823 | ||
5824 | See the comment above aarch64_composite_type_p for the notes on MODE. */ | |
5825 | ||
5826 | static bool | |
5827 | aarch64_short_vector_p (const_tree type, | |
5828 | enum machine_mode mode) | |
5829 | { | |
5830 | HOST_WIDE_INT size = -1; | |
5831 | ||
5832 | if (type && TREE_CODE (type) == VECTOR_TYPE) | |
5833 | size = int_size_in_bytes (type); | |
5834 | else if (!aarch64_composite_type_p (type, mode) | |
5835 | && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT | |
5836 | || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)) | |
5837 | size = GET_MODE_SIZE (mode); | |
5838 | ||
5839 | return (size == 8 || size == 16) ? true : false; | |
5840 | } | |
5841 | ||
5842 | /* Return TRUE if an argument, whose type is described by TYPE and MODE, | |
5843 | shall be passed or returned in simd/fp register(s) (providing these | |
5844 | parameter passing registers are available). | |
5845 | ||
5846 | Upon successful return, *COUNT returns the number of needed registers, | |
5847 | *BASE_MODE returns the mode of the individual register and when IS_HAF | |
5848 | is not NULL, *IS_HA indicates whether or not the argument is a homogeneous | |
5849 | floating-point aggregate or a homogeneous short-vector aggregate. */ | |
5850 | ||
5851 | static bool | |
5852 | aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode, | |
5853 | const_tree type, | |
5854 | enum machine_mode *base_mode, | |
5855 | int *count, | |
5856 | bool *is_ha) | |
5857 | { | |
5858 | enum machine_mode new_mode = VOIDmode; | |
5859 | bool composite_p = aarch64_composite_type_p (type, mode); | |
5860 | ||
5861 | if (is_ha != NULL) *is_ha = false; | |
5862 | ||
5863 | if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT) | |
5864 | || aarch64_short_vector_p (type, mode)) | |
5865 | { | |
5866 | *count = 1; | |
5867 | new_mode = mode; | |
5868 | } | |
5869 | else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) | |
5870 | { | |
5871 | if (is_ha != NULL) *is_ha = true; | |
5872 | *count = 2; | |
5873 | new_mode = GET_MODE_INNER (mode); | |
5874 | } | |
5875 | else if (type && composite_p) | |
5876 | { | |
5877 | int ag_count = aapcs_vfp_sub_candidate (type, &new_mode); | |
5878 | ||
5879 | if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS) | |
5880 | { | |
5881 | if (is_ha != NULL) *is_ha = true; | |
5882 | *count = ag_count; | |
5883 | } | |
5884 | else | |
5885 | return false; | |
5886 | } | |
5887 | else | |
5888 | return false; | |
5889 | ||
5890 | *base_mode = new_mode; | |
5891 | return true; | |
5892 | } | |
5893 | ||
5894 | /* Implement TARGET_STRUCT_VALUE_RTX. */ | |
5895 | ||
5896 | static rtx | |
5897 | aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, | |
5898 | int incoming ATTRIBUTE_UNUSED) | |
5899 | { | |
5900 | return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM); | |
5901 | } | |
5902 | ||
5903 | /* Implements target hook vector_mode_supported_p. */ | |
5904 | static bool | |
5905 | aarch64_vector_mode_supported_p (enum machine_mode mode) | |
5906 | { | |
5907 | if (TARGET_SIMD | |
5908 | && (mode == V4SImode || mode == V8HImode | |
5909 | || mode == V16QImode || mode == V2DImode | |
5910 | || mode == V2SImode || mode == V4HImode | |
5911 | || mode == V8QImode || mode == V2SFmode | |
5912 | || mode == V4SFmode || mode == V2DFmode)) | |
5913 | return true; | |
5914 | ||
5915 | return false; | |
5916 | } | |
5917 | ||
5918 | /* Return quad mode as the preferred SIMD mode. */ | |
5919 | static enum machine_mode | |
5920 | aarch64_preferred_simd_mode (enum machine_mode mode) | |
5921 | { | |
5922 | if (TARGET_SIMD) | |
5923 | switch (mode) | |
5924 | { | |
5925 | case DFmode: | |
5926 | return V2DFmode; | |
5927 | case SFmode: | |
5928 | return V4SFmode; | |
5929 | case SImode: | |
5930 | return V4SImode; | |
5931 | case HImode: | |
5932 | return V8HImode; | |
5933 | case QImode: | |
5934 | return V16QImode; | |
5935 | case DImode: | |
5936 | return V2DImode; | |
5937 | break; | |
5938 | ||
5939 | default:; | |
5940 | } | |
5941 | return word_mode; | |
5942 | } | |
5943 | ||
3b357264 JG |
5944 | /* Return the bitmask of possible vector sizes for the vectorizer |
5945 | to iterate over. */ | |
5946 | static unsigned int | |
5947 | aarch64_autovectorize_vector_sizes (void) | |
5948 | { | |
5949 | return (16 | 8); | |
5950 | } | |
5951 | ||
c6fc9e43 YZ |
5952 | /* A table to help perform AArch64-specific name mangling for AdvSIMD |
5953 | vector types in order to conform to the AAPCS64 (see "Procedure | |
5954 | Call Standard for the ARM 64-bit Architecture", Appendix A). To | |
5955 | qualify for emission with the mangled names defined in that document, | |
5956 | a vector type must not only be of the correct mode but also be | |
5957 | composed of AdvSIMD vector element types (e.g. | |
5958 | _builtin_aarch64_simd_qi); these types are registered by | |
5959 | aarch64_init_simd_builtins (). In other words, vector types defined | |
5960 | in other ways e.g. via vector_size attribute will get default | |
5961 | mangled names. */ | |
5962 | typedef struct | |
5963 | { | |
5964 | enum machine_mode mode; | |
5965 | const char *element_type_name; | |
5966 | const char *mangled_name; | |
5967 | } aarch64_simd_mangle_map_entry; | |
5968 | ||
5969 | static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = { | |
5970 | /* 64-bit containerized types. */ | |
5971 | { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" }, | |
5972 | { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" }, | |
5973 | { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" }, | |
5974 | { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" }, | |
5975 | { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" }, | |
5976 | { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" }, | |
5977 | { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" }, | |
5978 | { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" }, | |
5979 | { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" }, | |
5980 | /* 128-bit containerized types. */ | |
5981 | { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" }, | |
5982 | { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" }, | |
5983 | { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" }, | |
5984 | { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" }, | |
5985 | { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" }, | |
5986 | { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" }, | |
5987 | { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" }, | |
5988 | { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" }, | |
5989 | { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" }, | |
5990 | { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" }, | |
5991 | { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" }, | |
5992 | { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" }, | |
5993 | { VOIDmode, NULL, NULL } | |
5994 | }; | |
5995 | ||
ac2b960f YZ |
5996 | /* Implement TARGET_MANGLE_TYPE. */ |
5997 | ||
6f549691 | 5998 | static const char * |
ac2b960f YZ |
5999 | aarch64_mangle_type (const_tree type) |
6000 | { | |
6001 | /* The AArch64 ABI documents say that "__va_list" has to be | |
6002 | managled as if it is in the "std" namespace. */ | |
6003 | if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type)) | |
6004 | return "St9__va_list"; | |
6005 | ||
c6fc9e43 YZ |
6006 | /* Check the mode of the vector type, and the name of the vector |
6007 | element type, against the table. */ | |
6008 | if (TREE_CODE (type) == VECTOR_TYPE) | |
6009 | { | |
6010 | aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map; | |
6011 | ||
6012 | while (pos->mode != VOIDmode) | |
6013 | { | |
6014 | tree elt_type = TREE_TYPE (type); | |
6015 | ||
6016 | if (pos->mode == TYPE_MODE (type) | |
6017 | && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL | |
6018 | && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))), | |
6019 | pos->element_type_name)) | |
6020 | return pos->mangled_name; | |
6021 | ||
6022 | pos++; | |
6023 | } | |
6024 | } | |
6025 | ||
ac2b960f YZ |
6026 | /* Use the default mangling. */ |
6027 | return NULL; | |
6028 | } | |
6029 | ||
43e9d192 IB |
6030 | /* Return the equivalent letter for size. */ |
6031 | static unsigned char | |
6032 | sizetochar (int size) | |
6033 | { | |
6034 | switch (size) | |
6035 | { | |
6036 | case 64: return 'd'; | |
6037 | case 32: return 's'; | |
6038 | case 16: return 'h'; | |
6039 | case 8 : return 'b'; | |
6040 | default: gcc_unreachable (); | |
6041 | } | |
6042 | } | |
6043 | ||
3520f7cc JG |
6044 | /* Return true iff x is a uniform vector of floating-point |
6045 | constants, and the constant can be represented in | |
6046 | quarter-precision form. Note, as aarch64_float_const_representable | |
6047 | rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */ | |
6048 | static bool | |
6049 | aarch64_vect_float_const_representable_p (rtx x) | |
6050 | { | |
6051 | int i = 0; | |
6052 | REAL_VALUE_TYPE r0, ri; | |
6053 | rtx x0, xi; | |
6054 | ||
6055 | if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT) | |
6056 | return false; | |
6057 | ||
6058 | x0 = CONST_VECTOR_ELT (x, 0); | |
6059 | if (!CONST_DOUBLE_P (x0)) | |
6060 | return false; | |
6061 | ||
6062 | REAL_VALUE_FROM_CONST_DOUBLE (r0, x0); | |
6063 | ||
6064 | for (i = 1; i < CONST_VECTOR_NUNITS (x); i++) | |
6065 | { | |
6066 | xi = CONST_VECTOR_ELT (x, i); | |
6067 | if (!CONST_DOUBLE_P (xi)) | |
6068 | return false; | |
6069 | ||
6070 | REAL_VALUE_FROM_CONST_DOUBLE (ri, xi); | |
6071 | if (!REAL_VALUES_EQUAL (r0, ri)) | |
6072 | return false; | |
6073 | } | |
6074 | ||
6075 | return aarch64_float_const_representable_p (x0); | |
6076 | } | |
6077 | ||
6078 | /* TODO: This function returns values similar to those | |
6079 | returned by neon_valid_immediate in gcc/config/arm/arm.c | |
6080 | but the API here is different enough that these magic numbers | |
6081 | are not used. It should be sufficient to return true or false. */ | |
43e9d192 IB |
6082 | static int |
6083 | aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse, | |
6084 | rtx *modconst, int *elementwidth, | |
6085 | unsigned char *elementchar, | |
6086 | int *mvn, int *shift) | |
6087 | { | |
6088 | #define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \ | |
6089 | matches = 1; \ | |
6090 | for (i = 0; i < idx; i += (STRIDE)) \ | |
6091 | if (!(TEST)) \ | |
6092 | matches = 0; \ | |
6093 | if (matches) \ | |
6094 | { \ | |
6095 | immtype = (CLASS); \ | |
6096 | elsize = (ELSIZE); \ | |
6097 | elchar = sizetochar (elsize); \ | |
6098 | eshift = (SHIFT); \ | |
6099 | emvn = (NEG); \ | |
6100 | break; \ | |
6101 | } | |
6102 | ||
6103 | unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op); | |
6104 | unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
6105 | unsigned char bytes[16]; | |
6106 | unsigned char elchar = 0; | |
6107 | int immtype = -1, matches; | |
6108 | unsigned int invmask = inverse ? 0xff : 0; | |
6109 | int eshift, emvn; | |
6110 | ||
43e9d192 | 6111 | if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT) |
3520f7cc JG |
6112 | { |
6113 | bool simd_imm_zero = aarch64_simd_imm_zero_p (op, mode); | |
6114 | int elem_width = GET_MODE_BITSIZE (GET_MODE (CONST_VECTOR_ELT (op, 0))); | |
6115 | ||
6116 | if (!(simd_imm_zero | |
6117 | || aarch64_vect_float_const_representable_p (op))) | |
6118 | return -1; | |
6119 | ||
6120 | if (modconst) | |
6121 | *modconst = CONST_VECTOR_ELT (op, 0); | |
6122 | ||
6123 | if (elementwidth) | |
6124 | *elementwidth = elem_width; | |
6125 | ||
6126 | if (elementchar) | |
6127 | *elementchar = sizetochar (elem_width); | |
6128 | ||
6129 | if (shift) | |
6130 | *shift = 0; | |
6131 | ||
6132 | if (simd_imm_zero) | |
6133 | return 19; | |
6134 | else | |
6135 | return 18; | |
6136 | } | |
43e9d192 IB |
6137 | |
6138 | /* Splat vector constant out into a byte vector. */ | |
6139 | for (i = 0; i < n_elts; i++) | |
6140 | { | |
6141 | rtx el = CONST_VECTOR_ELT (op, i); | |
6142 | unsigned HOST_WIDE_INT elpart; | |
6143 | unsigned int part, parts; | |
6144 | ||
6145 | if (GET_CODE (el) == CONST_INT) | |
6146 | { | |
6147 | elpart = INTVAL (el); | |
6148 | parts = 1; | |
6149 | } | |
6150 | else if (GET_CODE (el) == CONST_DOUBLE) | |
6151 | { | |
6152 | elpart = CONST_DOUBLE_LOW (el); | |
6153 | parts = 2; | |
6154 | } | |
6155 | else | |
6156 | gcc_unreachable (); | |
6157 | ||
6158 | for (part = 0; part < parts; part++) | |
6159 | { | |
6160 | unsigned int byte; | |
6161 | for (byte = 0; byte < innersize; byte++) | |
6162 | { | |
6163 | bytes[idx++] = (elpart & 0xff) ^ invmask; | |
6164 | elpart >>= BITS_PER_UNIT; | |
6165 | } | |
6166 | if (GET_CODE (el) == CONST_DOUBLE) | |
6167 | elpart = CONST_DOUBLE_HIGH (el); | |
6168 | } | |
6169 | } | |
6170 | ||
6171 | /* Sanity check. */ | |
6172 | gcc_assert (idx == GET_MODE_SIZE (mode)); | |
6173 | ||
6174 | do | |
6175 | { | |
6176 | CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0 | |
6177 | && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0); | |
6178 | ||
6179 | CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1] | |
6180 | && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0); | |
6181 | ||
6182 | CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0 | |
6183 | && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0); | |
6184 | ||
6185 | CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0 | |
6186 | && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0); | |
6187 | ||
6188 | CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0); | |
6189 | ||
6190 | CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0); | |
6191 | ||
6192 | CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff | |
6193 | && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1); | |
6194 | ||
6195 | CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1] | |
6196 | && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1); | |
6197 | ||
6198 | CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff | |
6199 | && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1); | |
6200 | ||
6201 | CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff | |
6202 | && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1); | |
6203 | ||
6204 | CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1); | |
6205 | ||
6206 | CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1); | |
6207 | ||
6208 | CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1] | |
6209 | && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0); | |
6210 | ||
6211 | CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1] | |
6212 | && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1); | |
6213 | ||
6214 | CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff | |
6215 | && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 0, 0); | |
6216 | ||
6217 | CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0 | |
6218 | && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 0, 1); | |
6219 | ||
6220 | CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0); | |
6221 | ||
6222 | CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff) | |
6223 | && bytes[i] == bytes[(i + 8) % idx], 0, 0); | |
6224 | } | |
6225 | while (0); | |
6226 | ||
6227 | /* TODO: Currently the assembler cannot handle types 12 to 15. | |
6228 | And there is no way to specify cmode through the compiler. | |
6229 | Disable them till there is support in the assembler. */ | |
6230 | if (immtype == -1 | |
6231 | || (immtype >= 12 && immtype <= 15) | |
6232 | || immtype == 18) | |
6233 | return -1; | |
6234 | ||
6235 | ||
6236 | if (elementwidth) | |
6237 | *elementwidth = elsize; | |
6238 | ||
6239 | if (elementchar) | |
6240 | *elementchar = elchar; | |
6241 | ||
6242 | if (mvn) | |
6243 | *mvn = emvn; | |
6244 | ||
6245 | if (shift) | |
6246 | *shift = eshift; | |
6247 | ||
6248 | if (modconst) | |
6249 | { | |
6250 | unsigned HOST_WIDE_INT imm = 0; | |
6251 | ||
6252 | /* Un-invert bytes of recognized vector, if necessary. */ | |
6253 | if (invmask != 0) | |
6254 | for (i = 0; i < idx; i++) | |
6255 | bytes[i] ^= invmask; | |
6256 | ||
6257 | if (immtype == 17) | |
6258 | { | |
6259 | /* FIXME: Broken on 32-bit H_W_I hosts. */ | |
6260 | gcc_assert (sizeof (HOST_WIDE_INT) == 8); | |
6261 | ||
6262 | for (i = 0; i < 8; i++) | |
6263 | imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0) | |
6264 | << (i * BITS_PER_UNIT); | |
6265 | ||
6266 | *modconst = GEN_INT (imm); | |
6267 | } | |
6268 | else | |
6269 | { | |
6270 | unsigned HOST_WIDE_INT imm = 0; | |
6271 | ||
6272 | for (i = 0; i < elsize / BITS_PER_UNIT; i++) | |
6273 | imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT); | |
6274 | ||
6275 | /* Construct 'abcdefgh' because the assembler cannot handle | |
6276 | generic constants. */ | |
6277 | gcc_assert (shift != NULL && mvn != NULL); | |
6278 | if (*mvn) | |
6279 | imm = ~imm; | |
6280 | imm = (imm >> *shift) & 0xff; | |
6281 | *modconst = GEN_INT (imm); | |
6282 | } | |
6283 | } | |
6284 | ||
6285 | return immtype; | |
6286 | #undef CHECK | |
6287 | } | |
6288 | ||
6289 | /* Return TRUE if rtx X is legal for use as either a AdvSIMD MOVI instruction | |
6290 | (or, implicitly, MVNI) immediate. Write back width per element | |
3520f7cc JG |
6291 | to *ELEMENTWIDTH, and a modified constant (whatever should be output |
6292 | for a MOVI instruction) in *MODCONST. */ | |
43e9d192 IB |
6293 | int |
6294 | aarch64_simd_immediate_valid_for_move (rtx op, enum machine_mode mode, | |
6295 | rtx *modconst, int *elementwidth, | |
6296 | unsigned char *elementchar, | |
6297 | int *mvn, int *shift) | |
6298 | { | |
6299 | rtx tmpconst; | |
6300 | int tmpwidth; | |
6301 | unsigned char tmpwidthc; | |
6302 | int tmpmvn = 0, tmpshift = 0; | |
6303 | int retval = aarch64_simd_valid_immediate (op, mode, 0, &tmpconst, | |
6304 | &tmpwidth, &tmpwidthc, | |
6305 | &tmpmvn, &tmpshift); | |
6306 | ||
6307 | if (retval == -1) | |
6308 | return 0; | |
6309 | ||
6310 | if (modconst) | |
6311 | *modconst = tmpconst; | |
6312 | ||
6313 | if (elementwidth) | |
6314 | *elementwidth = tmpwidth; | |
6315 | ||
6316 | if (elementchar) | |
6317 | *elementchar = tmpwidthc; | |
6318 | ||
6319 | if (mvn) | |
6320 | *mvn = tmpmvn; | |
6321 | ||
6322 | if (shift) | |
6323 | *shift = tmpshift; | |
6324 | ||
6325 | return 1; | |
6326 | } | |
6327 | ||
6328 | static bool | |
6329 | aarch64_const_vec_all_same_int_p (rtx x, | |
6330 | HOST_WIDE_INT minval, | |
6331 | HOST_WIDE_INT maxval) | |
6332 | { | |
6333 | HOST_WIDE_INT firstval; | |
6334 | int count, i; | |
6335 | ||
6336 | if (GET_CODE (x) != CONST_VECTOR | |
6337 | || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT) | |
6338 | return false; | |
6339 | ||
6340 | firstval = INTVAL (CONST_VECTOR_ELT (x, 0)); | |
6341 | if (firstval < minval || firstval > maxval) | |
6342 | return false; | |
6343 | ||
6344 | count = CONST_VECTOR_NUNITS (x); | |
6345 | for (i = 1; i < count; i++) | |
6346 | if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval) | |
6347 | return false; | |
6348 | ||
6349 | return true; | |
6350 | } | |
6351 | ||
6352 | /* Check of immediate shift constants are within range. */ | |
6353 | bool | |
6354 | aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left) | |
6355 | { | |
6356 | int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT; | |
6357 | if (left) | |
6358 | return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1); | |
6359 | else | |
6360 | return aarch64_const_vec_all_same_int_p (x, 1, bit_width); | |
6361 | } | |
6362 | ||
3520f7cc JG |
6363 | /* Return true if X is a uniform vector where all elements |
6364 | are either the floating-point constant 0.0 or the | |
6365 | integer constant 0. */ | |
43e9d192 IB |
6366 | bool |
6367 | aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode) | |
6368 | { | |
3520f7cc | 6369 | return x == CONST0_RTX (mode); |
43e9d192 IB |
6370 | } |
6371 | ||
6372 | bool | |
6373 | aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED) | |
6374 | { | |
6375 | HOST_WIDE_INT imm = INTVAL (x); | |
6376 | int i; | |
6377 | ||
6378 | for (i = 0; i < 8; i++) | |
6379 | { | |
6380 | unsigned int byte = imm & 0xff; | |
6381 | if (byte != 0xff && byte != 0) | |
6382 | return false; | |
6383 | imm >>= 8; | |
6384 | } | |
6385 | ||
6386 | return true; | |
6387 | } | |
6388 | ||
6389 | /* Return a const_int vector of VAL. */ | |
6390 | rtx | |
6391 | aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val) | |
6392 | { | |
6393 | int nunits = GET_MODE_NUNITS (mode); | |
6394 | rtvec v = rtvec_alloc (nunits); | |
6395 | int i; | |
6396 | ||
6397 | for (i=0; i < nunits; i++) | |
6398 | RTVEC_ELT (v, i) = GEN_INT (val); | |
6399 | ||
6400 | return gen_rtx_CONST_VECTOR (mode, v); | |
6401 | } | |
6402 | ||
6403 | /* Construct and return a PARALLEL RTX vector. */ | |
6404 | rtx | |
6405 | aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high) | |
6406 | { | |
6407 | int nunits = GET_MODE_NUNITS (mode); | |
6408 | rtvec v = rtvec_alloc (nunits / 2); | |
6409 | int base = high ? nunits / 2 : 0; | |
6410 | rtx t1; | |
6411 | int i; | |
6412 | ||
6413 | for (i=0; i < nunits / 2; i++) | |
6414 | RTVEC_ELT (v, i) = GEN_INT (base + i); | |
6415 | ||
6416 | t1 = gen_rtx_PARALLEL (mode, v); | |
6417 | return t1; | |
6418 | } | |
6419 | ||
6420 | /* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and | |
6421 | HIGH (exclusive). */ | |
6422 | void | |
6423 | aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high) | |
6424 | { | |
6425 | HOST_WIDE_INT lane; | |
6426 | gcc_assert (GET_CODE (operand) == CONST_INT); | |
6427 | lane = INTVAL (operand); | |
6428 | ||
6429 | if (lane < low || lane >= high) | |
6430 | error ("lane out of range"); | |
6431 | } | |
6432 | ||
6433 | void | |
6434 | aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high) | |
6435 | { | |
6436 | gcc_assert (GET_CODE (operand) == CONST_INT); | |
6437 | HOST_WIDE_INT lane = INTVAL (operand); | |
6438 | ||
6439 | if (lane < low || lane >= high) | |
6440 | error ("constant out of range"); | |
6441 | } | |
6442 | ||
6443 | /* Emit code to reinterpret one AdvSIMD type as another, | |
6444 | without altering bits. */ | |
6445 | void | |
6446 | aarch64_simd_reinterpret (rtx dest, rtx src) | |
6447 | { | |
6448 | emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src)); | |
6449 | } | |
6450 | ||
6451 | /* Emit code to place a AdvSIMD pair result in memory locations (with equal | |
6452 | registers). */ | |
6453 | void | |
6454 | aarch64_simd_emit_pair_result_insn (enum machine_mode mode, | |
6455 | rtx (*intfn) (rtx, rtx, rtx), rtx destaddr, | |
6456 | rtx op1) | |
6457 | { | |
6458 | rtx mem = gen_rtx_MEM (mode, destaddr); | |
6459 | rtx tmp1 = gen_reg_rtx (mode); | |
6460 | rtx tmp2 = gen_reg_rtx (mode); | |
6461 | ||
6462 | emit_insn (intfn (tmp1, op1, tmp2)); | |
6463 | ||
6464 | emit_move_insn (mem, tmp1); | |
6465 | mem = adjust_address (mem, mode, GET_MODE_SIZE (mode)); | |
6466 | emit_move_insn (mem, tmp2); | |
6467 | } | |
6468 | ||
6469 | /* Return TRUE if OP is a valid vector addressing mode. */ | |
6470 | bool | |
6471 | aarch64_simd_mem_operand_p (rtx op) | |
6472 | { | |
6473 | return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC | |
6474 | || GET_CODE (XEXP (op, 0)) == REG); | |
6475 | } | |
6476 | ||
6477 | /* Set up OPERANDS for a register copy from SRC to DEST, taking care | |
6478 | not to early-clobber SRC registers in the process. | |
6479 | ||
6480 | We assume that the operands described by SRC and DEST represent a | |
6481 | decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the | |
6482 | number of components into which the copy has been decomposed. */ | |
6483 | void | |
6484 | aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest, | |
6485 | rtx *src, unsigned int count) | |
6486 | { | |
6487 | unsigned int i; | |
6488 | ||
6489 | if (!reg_overlap_mentioned_p (operands[0], operands[1]) | |
6490 | || REGNO (operands[0]) < REGNO (operands[1])) | |
6491 | { | |
6492 | for (i = 0; i < count; i++) | |
6493 | { | |
6494 | operands[2 * i] = dest[i]; | |
6495 | operands[2 * i + 1] = src[i]; | |
6496 | } | |
6497 | } | |
6498 | else | |
6499 | { | |
6500 | for (i = 0; i < count; i++) | |
6501 | { | |
6502 | operands[2 * i] = dest[count - i - 1]; | |
6503 | operands[2 * i + 1] = src[count - i - 1]; | |
6504 | } | |
6505 | } | |
6506 | } | |
6507 | ||
6508 | /* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is | |
6509 | one of VSTRUCT modes: OI, CI or XI. */ | |
6510 | int | |
6511 | aarch64_simd_attr_length_move (rtx insn) | |
6512 | { | |
43e9d192 IB |
6513 | enum machine_mode mode; |
6514 | ||
6515 | extract_insn_cached (insn); | |
6516 | ||
6517 | if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1])) | |
6518 | { | |
6519 | mode = GET_MODE (recog_data.operand[0]); | |
6520 | switch (mode) | |
6521 | { | |
6522 | case OImode: | |
6523 | return 8; | |
6524 | case CImode: | |
6525 | return 12; | |
6526 | case XImode: | |
6527 | return 16; | |
6528 | default: | |
6529 | gcc_unreachable (); | |
6530 | } | |
6531 | } | |
6532 | return 4; | |
6533 | } | |
6534 | ||
db0253a4 TB |
6535 | /* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum |
6536 | alignment of a vector to 128 bits. */ | |
6537 | static HOST_WIDE_INT | |
6538 | aarch64_simd_vector_alignment (const_tree type) | |
6539 | { | |
6540 | HOST_WIDE_INT align = tree_low_cst (TYPE_SIZE (type), 0); | |
6541 | return MIN (align, 128); | |
6542 | } | |
6543 | ||
6544 | /* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */ | |
6545 | static bool | |
6546 | aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed) | |
6547 | { | |
6548 | if (is_packed) | |
6549 | return false; | |
6550 | ||
6551 | /* We guarantee alignment for vectors up to 128-bits. */ | |
6552 | if (tree_int_cst_compare (TYPE_SIZE (type), | |
6553 | bitsize_int (BIGGEST_ALIGNMENT)) > 0) | |
6554 | return false; | |
6555 | ||
6556 | /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */ | |
6557 | return true; | |
6558 | } | |
6559 | ||
4369c11e TB |
6560 | /* If VALS is a vector constant that can be loaded into a register |
6561 | using DUP, generate instructions to do so and return an RTX to | |
6562 | assign to the register. Otherwise return NULL_RTX. */ | |
6563 | static rtx | |
6564 | aarch64_simd_dup_constant (rtx vals) | |
6565 | { | |
6566 | enum machine_mode mode = GET_MODE (vals); | |
6567 | enum machine_mode inner_mode = GET_MODE_INNER (mode); | |
6568 | int n_elts = GET_MODE_NUNITS (mode); | |
6569 | bool all_same = true; | |
6570 | rtx x; | |
6571 | int i; | |
6572 | ||
6573 | if (GET_CODE (vals) != CONST_VECTOR) | |
6574 | return NULL_RTX; | |
6575 | ||
6576 | for (i = 1; i < n_elts; ++i) | |
6577 | { | |
6578 | x = CONST_VECTOR_ELT (vals, i); | |
6579 | if (!rtx_equal_p (x, CONST_VECTOR_ELT (vals, 0))) | |
6580 | all_same = false; | |
6581 | } | |
6582 | ||
6583 | if (!all_same) | |
6584 | return NULL_RTX; | |
6585 | ||
6586 | /* We can load this constant by using DUP and a constant in a | |
6587 | single ARM register. This will be cheaper than a vector | |
6588 | load. */ | |
6589 | x = copy_to_mode_reg (inner_mode, CONST_VECTOR_ELT (vals, 0)); | |
6590 | return gen_rtx_VEC_DUPLICATE (mode, x); | |
6591 | } | |
6592 | ||
6593 | ||
6594 | /* Generate code to load VALS, which is a PARALLEL containing only | |
6595 | constants (for vec_init) or CONST_VECTOR, efficiently into a | |
6596 | register. Returns an RTX to copy into the register, or NULL_RTX | |
6597 | for a PARALLEL that can not be converted into a CONST_VECTOR. */ | |
1df3f464 | 6598 | static rtx |
4369c11e TB |
6599 | aarch64_simd_make_constant (rtx vals) |
6600 | { | |
6601 | enum machine_mode mode = GET_MODE (vals); | |
6602 | rtx const_dup; | |
6603 | rtx const_vec = NULL_RTX; | |
6604 | int n_elts = GET_MODE_NUNITS (mode); | |
6605 | int n_const = 0; | |
6606 | int i; | |
6607 | ||
6608 | if (GET_CODE (vals) == CONST_VECTOR) | |
6609 | const_vec = vals; | |
6610 | else if (GET_CODE (vals) == PARALLEL) | |
6611 | { | |
6612 | /* A CONST_VECTOR must contain only CONST_INTs and | |
6613 | CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF). | |
6614 | Only store valid constants in a CONST_VECTOR. */ | |
6615 | for (i = 0; i < n_elts; ++i) | |
6616 | { | |
6617 | rtx x = XVECEXP (vals, 0, i); | |
6618 | if (CONST_INT_P (x) || CONST_DOUBLE_P (x)) | |
6619 | n_const++; | |
6620 | } | |
6621 | if (n_const == n_elts) | |
6622 | const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)); | |
6623 | } | |
6624 | else | |
6625 | gcc_unreachable (); | |
6626 | ||
6627 | if (const_vec != NULL_RTX | |
6628 | && aarch64_simd_immediate_valid_for_move (const_vec, mode, NULL, NULL, | |
6629 | NULL, NULL, NULL)) | |
6630 | /* Load using MOVI/MVNI. */ | |
6631 | return const_vec; | |
6632 | else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX) | |
6633 | /* Loaded using DUP. */ | |
6634 | return const_dup; | |
6635 | else if (const_vec != NULL_RTX) | |
6636 | /* Load from constant pool. We can not take advantage of single-cycle | |
6637 | LD1 because we need a PC-relative addressing mode. */ | |
6638 | return const_vec; | |
6639 | else | |
6640 | /* A PARALLEL containing something not valid inside CONST_VECTOR. | |
6641 | We can not construct an initializer. */ | |
6642 | return NULL_RTX; | |
6643 | } | |
6644 | ||
6645 | void | |
6646 | aarch64_expand_vector_init (rtx target, rtx vals) | |
6647 | { | |
6648 | enum machine_mode mode = GET_MODE (target); | |
6649 | enum machine_mode inner_mode = GET_MODE_INNER (mode); | |
6650 | int n_elts = GET_MODE_NUNITS (mode); | |
6651 | int n_var = 0, one_var = -1; | |
6652 | bool all_same = true; | |
6653 | rtx x, mem; | |
6654 | int i; | |
6655 | ||
6656 | x = XVECEXP (vals, 0, 0); | |
6657 | if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x)) | |
6658 | n_var = 1, one_var = 0; | |
6659 | ||
6660 | for (i = 1; i < n_elts; ++i) | |
6661 | { | |
6662 | x = XVECEXP (vals, 0, i); | |
6663 | if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x)) | |
6664 | ++n_var, one_var = i; | |
6665 | ||
6666 | if (!rtx_equal_p (x, XVECEXP (vals, 0, 0))) | |
6667 | all_same = false; | |
6668 | } | |
6669 | ||
6670 | if (n_var == 0) | |
6671 | { | |
6672 | rtx constant = aarch64_simd_make_constant (vals); | |
6673 | if (constant != NULL_RTX) | |
6674 | { | |
6675 | emit_move_insn (target, constant); | |
6676 | return; | |
6677 | } | |
6678 | } | |
6679 | ||
6680 | /* Splat a single non-constant element if we can. */ | |
6681 | if (all_same) | |
6682 | { | |
6683 | x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0)); | |
6684 | aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x)); | |
6685 | return; | |
6686 | } | |
6687 | ||
6688 | /* One field is non-constant. Load constant then overwrite varying | |
6689 | field. This is more efficient than using the stack. */ | |
6690 | if (n_var == 1) | |
6691 | { | |
6692 | rtx copy = copy_rtx (vals); | |
6693 | rtx index = GEN_INT (one_var); | |
6694 | enum insn_code icode; | |
6695 | ||
6696 | /* Load constant part of vector, substitute neighboring value for | |
6697 | varying element. */ | |
6698 | XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, one_var ^ 1); | |
6699 | aarch64_expand_vector_init (target, copy); | |
6700 | ||
6701 | /* Insert variable. */ | |
6702 | x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var)); | |
6703 | icode = optab_handler (vec_set_optab, mode); | |
6704 | gcc_assert (icode != CODE_FOR_nothing); | |
6705 | emit_insn (GEN_FCN (icode) (target, x, index)); | |
6706 | return; | |
6707 | } | |
6708 | ||
6709 | /* Construct the vector in memory one field at a time | |
6710 | and load the whole vector. */ | |
6711 | mem = assign_stack_temp (mode, GET_MODE_SIZE (mode)); | |
6712 | for (i = 0; i < n_elts; i++) | |
6713 | emit_move_insn (adjust_address_nv (mem, inner_mode, | |
6714 | i * GET_MODE_SIZE (inner_mode)), | |
6715 | XVECEXP (vals, 0, i)); | |
6716 | emit_move_insn (target, mem); | |
6717 | ||
6718 | } | |
6719 | ||
43e9d192 IB |
6720 | static unsigned HOST_WIDE_INT |
6721 | aarch64_shift_truncation_mask (enum machine_mode mode) | |
6722 | { | |
6723 | return | |
6724 | (aarch64_vector_mode_supported_p (mode) | |
6725 | || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1); | |
6726 | } | |
6727 | ||
6728 | #ifndef TLS_SECTION_ASM_FLAG | |
6729 | #define TLS_SECTION_ASM_FLAG 'T' | |
6730 | #endif | |
6731 | ||
6732 | void | |
6733 | aarch64_elf_asm_named_section (const char *name, unsigned int flags, | |
6734 | tree decl ATTRIBUTE_UNUSED) | |
6735 | { | |
6736 | char flagchars[10], *f = flagchars; | |
6737 | ||
6738 | /* If we have already declared this section, we can use an | |
6739 | abbreviated form to switch back to it -- unless this section is | |
6740 | part of a COMDAT groups, in which case GAS requires the full | |
6741 | declaration every time. */ | |
6742 | if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE)) | |
6743 | && (flags & SECTION_DECLARED)) | |
6744 | { | |
6745 | fprintf (asm_out_file, "\t.section\t%s\n", name); | |
6746 | return; | |
6747 | } | |
6748 | ||
6749 | if (!(flags & SECTION_DEBUG)) | |
6750 | *f++ = 'a'; | |
6751 | if (flags & SECTION_WRITE) | |
6752 | *f++ = 'w'; | |
6753 | if (flags & SECTION_CODE) | |
6754 | *f++ = 'x'; | |
6755 | if (flags & SECTION_SMALL) | |
6756 | *f++ = 's'; | |
6757 | if (flags & SECTION_MERGE) | |
6758 | *f++ = 'M'; | |
6759 | if (flags & SECTION_STRINGS) | |
6760 | *f++ = 'S'; | |
6761 | if (flags & SECTION_TLS) | |
6762 | *f++ = TLS_SECTION_ASM_FLAG; | |
6763 | if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE)) | |
6764 | *f++ = 'G'; | |
6765 | *f = '\0'; | |
6766 | ||
6767 | fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars); | |
6768 | ||
6769 | if (!(flags & SECTION_NOTYPE)) | |
6770 | { | |
6771 | const char *type; | |
6772 | const char *format; | |
6773 | ||
6774 | if (flags & SECTION_BSS) | |
6775 | type = "nobits"; | |
6776 | else | |
6777 | type = "progbits"; | |
6778 | ||
6779 | #ifdef TYPE_OPERAND_FMT | |
6780 | format = "," TYPE_OPERAND_FMT; | |
6781 | #else | |
6782 | format = ",@%s"; | |
6783 | #endif | |
6784 | ||
6785 | fprintf (asm_out_file, format, type); | |
6786 | ||
6787 | if (flags & SECTION_ENTSIZE) | |
6788 | fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE); | |
6789 | if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE)) | |
6790 | { | |
6791 | if (TREE_CODE (decl) == IDENTIFIER_NODE) | |
6792 | fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl)); | |
6793 | else | |
6794 | fprintf (asm_out_file, ",%s,comdat", | |
6795 | IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl))); | |
6796 | } | |
6797 | } | |
6798 | ||
6799 | putc ('\n', asm_out_file); | |
6800 | } | |
6801 | ||
6802 | /* Select a format to encode pointers in exception handling data. */ | |
6803 | int | |
6804 | aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global) | |
6805 | { | |
6806 | int type; | |
6807 | switch (aarch64_cmodel) | |
6808 | { | |
6809 | case AARCH64_CMODEL_TINY: | |
6810 | case AARCH64_CMODEL_TINY_PIC: | |
6811 | case AARCH64_CMODEL_SMALL: | |
6812 | case AARCH64_CMODEL_SMALL_PIC: | |
6813 | /* text+got+data < 4Gb. 4-byte signed relocs are sufficient | |
6814 | for everything. */ | |
6815 | type = DW_EH_PE_sdata4; | |
6816 | break; | |
6817 | default: | |
6818 | /* No assumptions here. 8-byte relocs required. */ | |
6819 | type = DW_EH_PE_sdata8; | |
6820 | break; | |
6821 | } | |
6822 | return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type; | |
6823 | } | |
6824 | ||
0462169c SN |
6825 | /* Emit load exclusive. */ |
6826 | ||
6827 | static void | |
6828 | aarch64_emit_load_exclusive (enum machine_mode mode, rtx rval, | |
6829 | rtx mem, rtx model_rtx) | |
6830 | { | |
6831 | rtx (*gen) (rtx, rtx, rtx); | |
6832 | ||
6833 | switch (mode) | |
6834 | { | |
6835 | case QImode: gen = gen_aarch64_load_exclusiveqi; break; | |
6836 | case HImode: gen = gen_aarch64_load_exclusivehi; break; | |
6837 | case SImode: gen = gen_aarch64_load_exclusivesi; break; | |
6838 | case DImode: gen = gen_aarch64_load_exclusivedi; break; | |
6839 | default: | |
6840 | gcc_unreachable (); | |
6841 | } | |
6842 | ||
6843 | emit_insn (gen (rval, mem, model_rtx)); | |
6844 | } | |
6845 | ||
6846 | /* Emit store exclusive. */ | |
6847 | ||
6848 | static void | |
6849 | aarch64_emit_store_exclusive (enum machine_mode mode, rtx bval, | |
6850 | rtx rval, rtx mem, rtx model_rtx) | |
6851 | { | |
6852 | rtx (*gen) (rtx, rtx, rtx, rtx); | |
6853 | ||
6854 | switch (mode) | |
6855 | { | |
6856 | case QImode: gen = gen_aarch64_store_exclusiveqi; break; | |
6857 | case HImode: gen = gen_aarch64_store_exclusivehi; break; | |
6858 | case SImode: gen = gen_aarch64_store_exclusivesi; break; | |
6859 | case DImode: gen = gen_aarch64_store_exclusivedi; break; | |
6860 | default: | |
6861 | gcc_unreachable (); | |
6862 | } | |
6863 | ||
6864 | emit_insn (gen (bval, rval, mem, model_rtx)); | |
6865 | } | |
6866 | ||
6867 | /* Mark the previous jump instruction as unlikely. */ | |
6868 | ||
6869 | static void | |
6870 | aarch64_emit_unlikely_jump (rtx insn) | |
6871 | { | |
6872 | rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1); | |
6873 | ||
6874 | insn = emit_jump_insn (insn); | |
6875 | add_reg_note (insn, REG_BR_PROB, very_unlikely); | |
6876 | } | |
6877 | ||
6878 | /* Expand a compare and swap pattern. */ | |
6879 | ||
6880 | void | |
6881 | aarch64_expand_compare_and_swap (rtx operands[]) | |
6882 | { | |
6883 | rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x; | |
6884 | enum machine_mode mode, cmp_mode; | |
6885 | rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx); | |
6886 | ||
6887 | bval = operands[0]; | |
6888 | rval = operands[1]; | |
6889 | mem = operands[2]; | |
6890 | oldval = operands[3]; | |
6891 | newval = operands[4]; | |
6892 | is_weak = operands[5]; | |
6893 | mod_s = operands[6]; | |
6894 | mod_f = operands[7]; | |
6895 | mode = GET_MODE (mem); | |
6896 | cmp_mode = mode; | |
6897 | ||
6898 | /* Normally the succ memory model must be stronger than fail, but in the | |
6899 | unlikely event of fail being ACQUIRE and succ being RELEASE we need to | |
6900 | promote succ to ACQ_REL so that we don't lose the acquire semantics. */ | |
6901 | ||
6902 | if (INTVAL (mod_f) == MEMMODEL_ACQUIRE | |
6903 | && INTVAL (mod_s) == MEMMODEL_RELEASE) | |
6904 | mod_s = GEN_INT (MEMMODEL_ACQ_REL); | |
6905 | ||
6906 | switch (mode) | |
6907 | { | |
6908 | case QImode: | |
6909 | case HImode: | |
6910 | /* For short modes, we're going to perform the comparison in SImode, | |
6911 | so do the zero-extension now. */ | |
6912 | cmp_mode = SImode; | |
6913 | rval = gen_reg_rtx (SImode); | |
6914 | oldval = convert_modes (SImode, mode, oldval, true); | |
6915 | /* Fall through. */ | |
6916 | ||
6917 | case SImode: | |
6918 | case DImode: | |
6919 | /* Force the value into a register if needed. */ | |
6920 | if (!aarch64_plus_operand (oldval, mode)) | |
6921 | oldval = force_reg (cmp_mode, oldval); | |
6922 | break; | |
6923 | ||
6924 | default: | |
6925 | gcc_unreachable (); | |
6926 | } | |
6927 | ||
6928 | switch (mode) | |
6929 | { | |
6930 | case QImode: gen = gen_atomic_compare_and_swapqi_1; break; | |
6931 | case HImode: gen = gen_atomic_compare_and_swaphi_1; break; | |
6932 | case SImode: gen = gen_atomic_compare_and_swapsi_1; break; | |
6933 | case DImode: gen = gen_atomic_compare_and_swapdi_1; break; | |
6934 | default: | |
6935 | gcc_unreachable (); | |
6936 | } | |
6937 | ||
6938 | emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f)); | |
6939 | ||
6940 | if (mode == QImode || mode == HImode) | |
6941 | emit_move_insn (operands[1], gen_lowpart (mode, rval)); | |
6942 | ||
6943 | x = gen_rtx_REG (CCmode, CC_REGNUM); | |
6944 | x = gen_rtx_EQ (SImode, x, const0_rtx); | |
6945 | emit_insn (gen_rtx_SET (VOIDmode, bval, x)); | |
6946 | } | |
6947 | ||
6948 | /* Split a compare and swap pattern. */ | |
6949 | ||
6950 | void | |
6951 | aarch64_split_compare_and_swap (rtx operands[]) | |
6952 | { | |
6953 | rtx rval, mem, oldval, newval, scratch; | |
6954 | enum machine_mode mode; | |
0462169c SN |
6955 | bool is_weak; |
6956 | rtx label1, label2, x, cond; | |
6957 | ||
6958 | rval = operands[0]; | |
6959 | mem = operands[1]; | |
6960 | oldval = operands[2]; | |
6961 | newval = operands[3]; | |
6962 | is_weak = (operands[4] != const0_rtx); | |
0462169c SN |
6963 | scratch = operands[7]; |
6964 | mode = GET_MODE (mem); | |
6965 | ||
6966 | label1 = NULL_RTX; | |
6967 | if (!is_weak) | |
6968 | { | |
6969 | label1 = gen_label_rtx (); | |
6970 | emit_label (label1); | |
6971 | } | |
6972 | label2 = gen_label_rtx (); | |
6973 | ||
6974 | aarch64_emit_load_exclusive (mode, rval, mem, operands[5]); | |
6975 | ||
6976 | cond = aarch64_gen_compare_reg (NE, rval, oldval); | |
6977 | x = gen_rtx_NE (VOIDmode, cond, const0_rtx); | |
6978 | x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, | |
6979 | gen_rtx_LABEL_REF (Pmode, label2), pc_rtx); | |
6980 | aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x)); | |
6981 | ||
6982 | aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]); | |
6983 | ||
6984 | if (!is_weak) | |
6985 | { | |
6986 | x = gen_rtx_NE (VOIDmode, scratch, const0_rtx); | |
6987 | x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, | |
6988 | gen_rtx_LABEL_REF (Pmode, label1), pc_rtx); | |
6989 | aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x)); | |
6990 | } | |
6991 | else | |
6992 | { | |
6993 | cond = gen_rtx_REG (CCmode, CC_REGNUM); | |
6994 | x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx); | |
6995 | emit_insn (gen_rtx_SET (VOIDmode, cond, x)); | |
6996 | } | |
6997 | ||
6998 | emit_label (label2); | |
6999 | } | |
7000 | ||
7001 | /* Split an atomic operation. */ | |
7002 | ||
7003 | void | |
7004 | aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem, | |
7005 | rtx value, rtx model_rtx, rtx cond) | |
7006 | { | |
7007 | enum machine_mode mode = GET_MODE (mem); | |
7008 | enum machine_mode wmode = (mode == DImode ? DImode : SImode); | |
7009 | rtx label, x; | |
7010 | ||
7011 | label = gen_label_rtx (); | |
7012 | emit_label (label); | |
7013 | ||
7014 | if (new_out) | |
7015 | new_out = gen_lowpart (wmode, new_out); | |
7016 | if (old_out) | |
7017 | old_out = gen_lowpart (wmode, old_out); | |
7018 | else | |
7019 | old_out = new_out; | |
7020 | value = simplify_gen_subreg (wmode, value, mode, 0); | |
7021 | ||
7022 | aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx); | |
7023 | ||
7024 | switch (code) | |
7025 | { | |
7026 | case SET: | |
7027 | new_out = value; | |
7028 | break; | |
7029 | ||
7030 | case NOT: | |
7031 | x = gen_rtx_AND (wmode, old_out, value); | |
7032 | emit_insn (gen_rtx_SET (VOIDmode, new_out, x)); | |
7033 | x = gen_rtx_NOT (wmode, new_out); | |
7034 | emit_insn (gen_rtx_SET (VOIDmode, new_out, x)); | |
7035 | break; | |
7036 | ||
7037 | case MINUS: | |
7038 | if (CONST_INT_P (value)) | |
7039 | { | |
7040 | value = GEN_INT (-INTVAL (value)); | |
7041 | code = PLUS; | |
7042 | } | |
7043 | /* Fall through. */ | |
7044 | ||
7045 | default: | |
7046 | x = gen_rtx_fmt_ee (code, wmode, old_out, value); | |
7047 | emit_insn (gen_rtx_SET (VOIDmode, new_out, x)); | |
7048 | break; | |
7049 | } | |
7050 | ||
7051 | aarch64_emit_store_exclusive (mode, cond, mem, | |
7052 | gen_lowpart (mode, new_out), model_rtx); | |
7053 | ||
7054 | x = gen_rtx_NE (VOIDmode, cond, const0_rtx); | |
7055 | x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, | |
7056 | gen_rtx_LABEL_REF (Pmode, label), pc_rtx); | |
7057 | aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x)); | |
7058 | } | |
7059 | ||
43e9d192 IB |
7060 | static void |
7061 | aarch64_start_file (void) | |
7062 | { | |
7063 | if (selected_arch) | |
7064 | asm_fprintf (asm_out_file, "\t.arch %s\n", selected_arch->name); | |
7065 | else if (selected_cpu) | |
7066 | asm_fprintf (asm_out_file, "\t.cpu %s\n", selected_cpu->name); | |
7067 | default_file_start(); | |
7068 | } | |
7069 | ||
7070 | /* Target hook for c_mode_for_suffix. */ | |
7071 | static enum machine_mode | |
7072 | aarch64_c_mode_for_suffix (char suffix) | |
7073 | { | |
7074 | if (suffix == 'q') | |
7075 | return TFmode; | |
7076 | ||
7077 | return VOIDmode; | |
7078 | } | |
7079 | ||
3520f7cc JG |
7080 | /* We can only represent floating point constants which will fit in |
7081 | "quarter-precision" values. These values are characterised by | |
7082 | a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given | |
7083 | by: | |
7084 | ||
7085 | (-1)^s * (n/16) * 2^r | |
7086 | ||
7087 | Where: | |
7088 | 's' is the sign bit. | |
7089 | 'n' is an integer in the range 16 <= n <= 31. | |
7090 | 'r' is an integer in the range -3 <= r <= 4. */ | |
7091 | ||
7092 | /* Return true iff X can be represented by a quarter-precision | |
7093 | floating point immediate operand X. Note, we cannot represent 0.0. */ | |
7094 | bool | |
7095 | aarch64_float_const_representable_p (rtx x) | |
7096 | { | |
7097 | /* This represents our current view of how many bits | |
7098 | make up the mantissa. */ | |
7099 | int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1; | |
ba96cdfb | 7100 | int exponent; |
3520f7cc JG |
7101 | unsigned HOST_WIDE_INT mantissa, mask; |
7102 | HOST_WIDE_INT m1, m2; | |
7103 | REAL_VALUE_TYPE r, m; | |
7104 | ||
7105 | if (!CONST_DOUBLE_P (x)) | |
7106 | return false; | |
7107 | ||
7108 | REAL_VALUE_FROM_CONST_DOUBLE (r, x); | |
7109 | ||
7110 | /* We cannot represent infinities, NaNs or +/-zero. We won't | |
7111 | know if we have +zero until we analyse the mantissa, but we | |
7112 | can reject the other invalid values. */ | |
7113 | if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) | |
7114 | || REAL_VALUE_MINUS_ZERO (r)) | |
7115 | return false; | |
7116 | ||
ba96cdfb | 7117 | /* Extract exponent. */ |
3520f7cc JG |
7118 | r = real_value_abs (&r); |
7119 | exponent = REAL_EXP (&r); | |
7120 | ||
7121 | /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the | |
7122 | highest (sign) bit, with a fixed binary point at bit point_pos. | |
7123 | m1 holds the low part of the mantissa, m2 the high part. | |
7124 | WARNING: If we ever have a representation using more than 2 * H_W_I - 1 | |
7125 | bits for the mantissa, this can fail (low bits will be lost). */ | |
7126 | real_ldexp (&m, &r, point_pos - exponent); | |
7127 | REAL_VALUE_TO_INT (&m1, &m2, m); | |
7128 | ||
7129 | /* If the low part of the mantissa has bits set we cannot represent | |
7130 | the value. */ | |
7131 | if (m1 != 0) | |
7132 | return false; | |
7133 | /* We have rejected the lower HOST_WIDE_INT, so update our | |
7134 | understanding of how many bits lie in the mantissa and | |
7135 | look only at the high HOST_WIDE_INT. */ | |
7136 | mantissa = m2; | |
7137 | point_pos -= HOST_BITS_PER_WIDE_INT; | |
7138 | ||
7139 | /* We can only represent values with a mantissa of the form 1.xxxx. */ | |
7140 | mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1; | |
7141 | if ((mantissa & mask) != 0) | |
7142 | return false; | |
7143 | ||
7144 | /* Having filtered unrepresentable values, we may now remove all | |
7145 | but the highest 5 bits. */ | |
7146 | mantissa >>= point_pos - 5; | |
7147 | ||
7148 | /* We cannot represent the value 0.0, so reject it. This is handled | |
7149 | elsewhere. */ | |
7150 | if (mantissa == 0) | |
7151 | return false; | |
7152 | ||
7153 | /* Then, as bit 4 is always set, we can mask it off, leaving | |
7154 | the mantissa in the range [0, 15]. */ | |
7155 | mantissa &= ~(1 << 4); | |
7156 | gcc_assert (mantissa <= 15); | |
7157 | ||
7158 | /* GCC internally does not use IEEE754-like encoding (where normalized | |
7159 | significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c). | |
7160 | Our mantissa values are shifted 4 places to the left relative to | |
7161 | normalized IEEE754 so we must modify the exponent returned by REAL_EXP | |
7162 | by 5 places to correct for GCC's representation. */ | |
7163 | exponent = 5 - exponent; | |
7164 | ||
7165 | return (exponent >= 0 && exponent <= 7); | |
7166 | } | |
7167 | ||
7168 | char* | |
7169 | aarch64_output_simd_mov_immediate (rtx *const_vector, | |
7170 | enum machine_mode mode, | |
7171 | unsigned width) | |
7172 | { | |
7173 | int is_valid; | |
7174 | unsigned char widthc; | |
7175 | int lane_width_bits; | |
7176 | static char templ[40]; | |
7177 | int shift = 0, mvn = 0; | |
7178 | const char *mnemonic; | |
7179 | unsigned int lane_count = 0; | |
7180 | ||
7181 | is_valid = | |
7182 | aarch64_simd_immediate_valid_for_move (*const_vector, mode, | |
7183 | const_vector, &lane_width_bits, | |
7184 | &widthc, &mvn, &shift); | |
7185 | gcc_assert (is_valid); | |
7186 | ||
7187 | mode = GET_MODE_INNER (mode); | |
7188 | if (mode == SFmode || mode == DFmode) | |
7189 | { | |
7190 | bool zero_p = | |
7191 | aarch64_float_const_zero_rtx_p (*const_vector); | |
7192 | gcc_assert (shift == 0); | |
7193 | mnemonic = zero_p ? "movi" : "fmov"; | |
7194 | } | |
7195 | else | |
7196 | mnemonic = mvn ? "mvni" : "movi"; | |
7197 | ||
7198 | gcc_assert (lane_width_bits != 0); | |
7199 | lane_count = width / lane_width_bits; | |
7200 | ||
7201 | if (lane_count == 1) | |
7202 | snprintf (templ, sizeof (templ), "%s\t%%d0, %%1", mnemonic); | |
7203 | else if (shift) | |
7204 | snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1, lsl %d", | |
7205 | mnemonic, lane_count, widthc, shift); | |
7206 | else | |
7207 | snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1", | |
7208 | mnemonic, lane_count, widthc); | |
7209 | return templ; | |
7210 | } | |
7211 | ||
88b08073 JG |
7212 | /* Split operands into moves from op[1] + op[2] into op[0]. */ |
7213 | ||
7214 | void | |
7215 | aarch64_split_combinev16qi (rtx operands[3]) | |
7216 | { | |
7217 | unsigned int dest = REGNO (operands[0]); | |
7218 | unsigned int src1 = REGNO (operands[1]); | |
7219 | unsigned int src2 = REGNO (operands[2]); | |
7220 | enum machine_mode halfmode = GET_MODE (operands[1]); | |
7221 | unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode); | |
7222 | rtx destlo, desthi; | |
7223 | ||
7224 | gcc_assert (halfmode == V16QImode); | |
7225 | ||
7226 | if (src1 == dest && src2 == dest + halfregs) | |
7227 | { | |
7228 | /* No-op move. Can't split to nothing; emit something. */ | |
7229 | emit_note (NOTE_INSN_DELETED); | |
7230 | return; | |
7231 | } | |
7232 | ||
7233 | /* Preserve register attributes for variable tracking. */ | |
7234 | destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0); | |
7235 | desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs, | |
7236 | GET_MODE_SIZE (halfmode)); | |
7237 | ||
7238 | /* Special case of reversed high/low parts. */ | |
7239 | if (reg_overlap_mentioned_p (operands[2], destlo) | |
7240 | && reg_overlap_mentioned_p (operands[1], desthi)) | |
7241 | { | |
7242 | emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2])); | |
7243 | emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2])); | |
7244 | emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2])); | |
7245 | } | |
7246 | else if (!reg_overlap_mentioned_p (operands[2], destlo)) | |
7247 | { | |
7248 | /* Try to avoid unnecessary moves if part of the result | |
7249 | is in the right place already. */ | |
7250 | if (src1 != dest) | |
7251 | emit_move_insn (destlo, operands[1]); | |
7252 | if (src2 != dest + halfregs) | |
7253 | emit_move_insn (desthi, operands[2]); | |
7254 | } | |
7255 | else | |
7256 | { | |
7257 | if (src2 != dest + halfregs) | |
7258 | emit_move_insn (desthi, operands[2]); | |
7259 | if (src1 != dest) | |
7260 | emit_move_insn (destlo, operands[1]); | |
7261 | } | |
7262 | } | |
7263 | ||
7264 | /* vec_perm support. */ | |
7265 | ||
7266 | #define MAX_VECT_LEN 16 | |
7267 | ||
7268 | struct expand_vec_perm_d | |
7269 | { | |
7270 | rtx target, op0, op1; | |
7271 | unsigned char perm[MAX_VECT_LEN]; | |
7272 | enum machine_mode vmode; | |
7273 | unsigned char nelt; | |
7274 | bool one_vector_p; | |
7275 | bool testing_p; | |
7276 | }; | |
7277 | ||
7278 | /* Generate a variable permutation. */ | |
7279 | ||
7280 | static void | |
7281 | aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel) | |
7282 | { | |
7283 | enum machine_mode vmode = GET_MODE (target); | |
7284 | bool one_vector_p = rtx_equal_p (op0, op1); | |
7285 | ||
7286 | gcc_checking_assert (vmode == V8QImode || vmode == V16QImode); | |
7287 | gcc_checking_assert (GET_MODE (op0) == vmode); | |
7288 | gcc_checking_assert (GET_MODE (op1) == vmode); | |
7289 | gcc_checking_assert (GET_MODE (sel) == vmode); | |
7290 | gcc_checking_assert (TARGET_SIMD); | |
7291 | ||
7292 | if (one_vector_p) | |
7293 | { | |
7294 | if (vmode == V8QImode) | |
7295 | { | |
7296 | /* Expand the argument to a V16QI mode by duplicating it. */ | |
7297 | rtx pair = gen_reg_rtx (V16QImode); | |
7298 | emit_insn (gen_aarch64_combinev8qi (pair, op0, op0)); | |
7299 | emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel)); | |
7300 | } | |
7301 | else | |
7302 | { | |
7303 | emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel)); | |
7304 | } | |
7305 | } | |
7306 | else | |
7307 | { | |
7308 | rtx pair; | |
7309 | ||
7310 | if (vmode == V8QImode) | |
7311 | { | |
7312 | pair = gen_reg_rtx (V16QImode); | |
7313 | emit_insn (gen_aarch64_combinev8qi (pair, op0, op1)); | |
7314 | emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel)); | |
7315 | } | |
7316 | else | |
7317 | { | |
7318 | pair = gen_reg_rtx (OImode); | |
7319 | emit_insn (gen_aarch64_combinev16qi (pair, op0, op1)); | |
7320 | emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel)); | |
7321 | } | |
7322 | } | |
7323 | } | |
7324 | ||
7325 | void | |
7326 | aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) | |
7327 | { | |
7328 | enum machine_mode vmode = GET_MODE (target); | |
7329 | unsigned int i, nelt = GET_MODE_NUNITS (vmode); | |
7330 | bool one_vector_p = rtx_equal_p (op0, op1); | |
7331 | rtx rmask[MAX_VECT_LEN], mask; | |
7332 | ||
7333 | gcc_checking_assert (!BYTES_BIG_ENDIAN); | |
7334 | ||
7335 | /* The TBL instruction does not use a modulo index, so we must take care | |
7336 | of that ourselves. */ | |
7337 | mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1); | |
7338 | for (i = 0; i < nelt; ++i) | |
7339 | rmask[i] = mask; | |
7340 | mask = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rmask)); | |
7341 | sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN); | |
7342 | ||
7343 | aarch64_expand_vec_perm_1 (target, op0, op1, sel); | |
7344 | } | |
7345 | ||
cc4d934f JG |
7346 | /* Recognize patterns suitable for the TRN instructions. */ |
7347 | static bool | |
7348 | aarch64_evpc_trn (struct expand_vec_perm_d *d) | |
7349 | { | |
7350 | unsigned int i, odd, mask, nelt = d->nelt; | |
7351 | rtx out, in0, in1, x; | |
7352 | rtx (*gen) (rtx, rtx, rtx); | |
7353 | enum machine_mode vmode = d->vmode; | |
7354 | ||
7355 | if (GET_MODE_UNIT_SIZE (vmode) > 8) | |
7356 | return false; | |
7357 | ||
7358 | /* Note that these are little-endian tests. | |
7359 | We correct for big-endian later. */ | |
7360 | if (d->perm[0] == 0) | |
7361 | odd = 0; | |
7362 | else if (d->perm[0] == 1) | |
7363 | odd = 1; | |
7364 | else | |
7365 | return false; | |
7366 | mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1); | |
7367 | ||
7368 | for (i = 0; i < nelt; i += 2) | |
7369 | { | |
7370 | if (d->perm[i] != i + odd) | |
7371 | return false; | |
7372 | if (d->perm[i + 1] != ((i + nelt + odd) & mask)) | |
7373 | return false; | |
7374 | } | |
7375 | ||
7376 | /* Success! */ | |
7377 | if (d->testing_p) | |
7378 | return true; | |
7379 | ||
7380 | in0 = d->op0; | |
7381 | in1 = d->op1; | |
7382 | if (BYTES_BIG_ENDIAN) | |
7383 | { | |
7384 | x = in0, in0 = in1, in1 = x; | |
7385 | odd = !odd; | |
7386 | } | |
7387 | out = d->target; | |
7388 | ||
7389 | if (odd) | |
7390 | { | |
7391 | switch (vmode) | |
7392 | { | |
7393 | case V16QImode: gen = gen_aarch64_trn2v16qi; break; | |
7394 | case V8QImode: gen = gen_aarch64_trn2v8qi; break; | |
7395 | case V8HImode: gen = gen_aarch64_trn2v8hi; break; | |
7396 | case V4HImode: gen = gen_aarch64_trn2v4hi; break; | |
7397 | case V4SImode: gen = gen_aarch64_trn2v4si; break; | |
7398 | case V2SImode: gen = gen_aarch64_trn2v2si; break; | |
7399 | case V2DImode: gen = gen_aarch64_trn2v2di; break; | |
7400 | case V4SFmode: gen = gen_aarch64_trn2v4sf; break; | |
7401 | case V2SFmode: gen = gen_aarch64_trn2v2sf; break; | |
7402 | case V2DFmode: gen = gen_aarch64_trn2v2df; break; | |
7403 | default: | |
7404 | return false; | |
7405 | } | |
7406 | } | |
7407 | else | |
7408 | { | |
7409 | switch (vmode) | |
7410 | { | |
7411 | case V16QImode: gen = gen_aarch64_trn1v16qi; break; | |
7412 | case V8QImode: gen = gen_aarch64_trn1v8qi; break; | |
7413 | case V8HImode: gen = gen_aarch64_trn1v8hi; break; | |
7414 | case V4HImode: gen = gen_aarch64_trn1v4hi; break; | |
7415 | case V4SImode: gen = gen_aarch64_trn1v4si; break; | |
7416 | case V2SImode: gen = gen_aarch64_trn1v2si; break; | |
7417 | case V2DImode: gen = gen_aarch64_trn1v2di; break; | |
7418 | case V4SFmode: gen = gen_aarch64_trn1v4sf; break; | |
7419 | case V2SFmode: gen = gen_aarch64_trn1v2sf; break; | |
7420 | case V2DFmode: gen = gen_aarch64_trn1v2df; break; | |
7421 | default: | |
7422 | return false; | |
7423 | } | |
7424 | } | |
7425 | ||
7426 | emit_insn (gen (out, in0, in1)); | |
7427 | return true; | |
7428 | } | |
7429 | ||
7430 | /* Recognize patterns suitable for the UZP instructions. */ | |
7431 | static bool | |
7432 | aarch64_evpc_uzp (struct expand_vec_perm_d *d) | |
7433 | { | |
7434 | unsigned int i, odd, mask, nelt = d->nelt; | |
7435 | rtx out, in0, in1, x; | |
7436 | rtx (*gen) (rtx, rtx, rtx); | |
7437 | enum machine_mode vmode = d->vmode; | |
7438 | ||
7439 | if (GET_MODE_UNIT_SIZE (vmode) > 8) | |
7440 | return false; | |
7441 | ||
7442 | /* Note that these are little-endian tests. | |
7443 | We correct for big-endian later. */ | |
7444 | if (d->perm[0] == 0) | |
7445 | odd = 0; | |
7446 | else if (d->perm[0] == 1) | |
7447 | odd = 1; | |
7448 | else | |
7449 | return false; | |
7450 | mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1); | |
7451 | ||
7452 | for (i = 0; i < nelt; i++) | |
7453 | { | |
7454 | unsigned elt = (i * 2 + odd) & mask; | |
7455 | if (d->perm[i] != elt) | |
7456 | return false; | |
7457 | } | |
7458 | ||
7459 | /* Success! */ | |
7460 | if (d->testing_p) | |
7461 | return true; | |
7462 | ||
7463 | in0 = d->op0; | |
7464 | in1 = d->op1; | |
7465 | if (BYTES_BIG_ENDIAN) | |
7466 | { | |
7467 | x = in0, in0 = in1, in1 = x; | |
7468 | odd = !odd; | |
7469 | } | |
7470 | out = d->target; | |
7471 | ||
7472 | if (odd) | |
7473 | { | |
7474 | switch (vmode) | |
7475 | { | |
7476 | case V16QImode: gen = gen_aarch64_uzp2v16qi; break; | |
7477 | case V8QImode: gen = gen_aarch64_uzp2v8qi; break; | |
7478 | case V8HImode: gen = gen_aarch64_uzp2v8hi; break; | |
7479 | case V4HImode: gen = gen_aarch64_uzp2v4hi; break; | |
7480 | case V4SImode: gen = gen_aarch64_uzp2v4si; break; | |
7481 | case V2SImode: gen = gen_aarch64_uzp2v2si; break; | |
7482 | case V2DImode: gen = gen_aarch64_uzp2v2di; break; | |
7483 | case V4SFmode: gen = gen_aarch64_uzp2v4sf; break; | |
7484 | case V2SFmode: gen = gen_aarch64_uzp2v2sf; break; | |
7485 | case V2DFmode: gen = gen_aarch64_uzp2v2df; break; | |
7486 | default: | |
7487 | return false; | |
7488 | } | |
7489 | } | |
7490 | else | |
7491 | { | |
7492 | switch (vmode) | |
7493 | { | |
7494 | case V16QImode: gen = gen_aarch64_uzp1v16qi; break; | |
7495 | case V8QImode: gen = gen_aarch64_uzp1v8qi; break; | |
7496 | case V8HImode: gen = gen_aarch64_uzp1v8hi; break; | |
7497 | case V4HImode: gen = gen_aarch64_uzp1v4hi; break; | |
7498 | case V4SImode: gen = gen_aarch64_uzp1v4si; break; | |
7499 | case V2SImode: gen = gen_aarch64_uzp1v2si; break; | |
7500 | case V2DImode: gen = gen_aarch64_uzp1v2di; break; | |
7501 | case V4SFmode: gen = gen_aarch64_uzp1v4sf; break; | |
7502 | case V2SFmode: gen = gen_aarch64_uzp1v2sf; break; | |
7503 | case V2DFmode: gen = gen_aarch64_uzp1v2df; break; | |
7504 | default: | |
7505 | return false; | |
7506 | } | |
7507 | } | |
7508 | ||
7509 | emit_insn (gen (out, in0, in1)); | |
7510 | return true; | |
7511 | } | |
7512 | ||
7513 | /* Recognize patterns suitable for the ZIP instructions. */ | |
7514 | static bool | |
7515 | aarch64_evpc_zip (struct expand_vec_perm_d *d) | |
7516 | { | |
7517 | unsigned int i, high, mask, nelt = d->nelt; | |
7518 | rtx out, in0, in1, x; | |
7519 | rtx (*gen) (rtx, rtx, rtx); | |
7520 | enum machine_mode vmode = d->vmode; | |
7521 | ||
7522 | if (GET_MODE_UNIT_SIZE (vmode) > 8) | |
7523 | return false; | |
7524 | ||
7525 | /* Note that these are little-endian tests. | |
7526 | We correct for big-endian later. */ | |
7527 | high = nelt / 2; | |
7528 | if (d->perm[0] == high) | |
7529 | /* Do Nothing. */ | |
7530 | ; | |
7531 | else if (d->perm[0] == 0) | |
7532 | high = 0; | |
7533 | else | |
7534 | return false; | |
7535 | mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1); | |
7536 | ||
7537 | for (i = 0; i < nelt / 2; i++) | |
7538 | { | |
7539 | unsigned elt = (i + high) & mask; | |
7540 | if (d->perm[i * 2] != elt) | |
7541 | return false; | |
7542 | elt = (elt + nelt) & mask; | |
7543 | if (d->perm[i * 2 + 1] != elt) | |
7544 | return false; | |
7545 | } | |
7546 | ||
7547 | /* Success! */ | |
7548 | if (d->testing_p) | |
7549 | return true; | |
7550 | ||
7551 | in0 = d->op0; | |
7552 | in1 = d->op1; | |
7553 | if (BYTES_BIG_ENDIAN) | |
7554 | { | |
7555 | x = in0, in0 = in1, in1 = x; | |
7556 | high = !high; | |
7557 | } | |
7558 | out = d->target; | |
7559 | ||
7560 | if (high) | |
7561 | { | |
7562 | switch (vmode) | |
7563 | { | |
7564 | case V16QImode: gen = gen_aarch64_zip2v16qi; break; | |
7565 | case V8QImode: gen = gen_aarch64_zip2v8qi; break; | |
7566 | case V8HImode: gen = gen_aarch64_zip2v8hi; break; | |
7567 | case V4HImode: gen = gen_aarch64_zip2v4hi; break; | |
7568 | case V4SImode: gen = gen_aarch64_zip2v4si; break; | |
7569 | case V2SImode: gen = gen_aarch64_zip2v2si; break; | |
7570 | case V2DImode: gen = gen_aarch64_zip2v2di; break; | |
7571 | case V4SFmode: gen = gen_aarch64_zip2v4sf; break; | |
7572 | case V2SFmode: gen = gen_aarch64_zip2v2sf; break; | |
7573 | case V2DFmode: gen = gen_aarch64_zip2v2df; break; | |
7574 | default: | |
7575 | return false; | |
7576 | } | |
7577 | } | |
7578 | else | |
7579 | { | |
7580 | switch (vmode) | |
7581 | { | |
7582 | case V16QImode: gen = gen_aarch64_zip1v16qi; break; | |
7583 | case V8QImode: gen = gen_aarch64_zip1v8qi; break; | |
7584 | case V8HImode: gen = gen_aarch64_zip1v8hi; break; | |
7585 | case V4HImode: gen = gen_aarch64_zip1v4hi; break; | |
7586 | case V4SImode: gen = gen_aarch64_zip1v4si; break; | |
7587 | case V2SImode: gen = gen_aarch64_zip1v2si; break; | |
7588 | case V2DImode: gen = gen_aarch64_zip1v2di; break; | |
7589 | case V4SFmode: gen = gen_aarch64_zip1v4sf; break; | |
7590 | case V2SFmode: gen = gen_aarch64_zip1v2sf; break; | |
7591 | case V2DFmode: gen = gen_aarch64_zip1v2df; break; | |
7592 | default: | |
7593 | return false; | |
7594 | } | |
7595 | } | |
7596 | ||
7597 | emit_insn (gen (out, in0, in1)); | |
7598 | return true; | |
7599 | } | |
7600 | ||
88b08073 JG |
7601 | static bool |
7602 | aarch64_evpc_tbl (struct expand_vec_perm_d *d) | |
7603 | { | |
7604 | rtx rperm[MAX_VECT_LEN], sel; | |
7605 | enum machine_mode vmode = d->vmode; | |
7606 | unsigned int i, nelt = d->nelt; | |
7607 | ||
7608 | /* TODO: ARM's TBL indexing is little-endian. In order to handle GCC's | |
7609 | numbering of elements for big-endian, we must reverse the order. */ | |
7610 | if (BYTES_BIG_ENDIAN) | |
7611 | return false; | |
7612 | ||
7613 | if (d->testing_p) | |
7614 | return true; | |
7615 | ||
7616 | /* Generic code will try constant permutation twice. Once with the | |
7617 | original mode and again with the elements lowered to QImode. | |
7618 | So wait and don't do the selector expansion ourselves. */ | |
7619 | if (vmode != V8QImode && vmode != V16QImode) | |
7620 | return false; | |
7621 | ||
7622 | for (i = 0; i < nelt; ++i) | |
7623 | rperm[i] = GEN_INT (d->perm[i]); | |
7624 | sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm)); | |
7625 | sel = force_reg (vmode, sel); | |
7626 | ||
7627 | aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel); | |
7628 | return true; | |
7629 | } | |
7630 | ||
7631 | static bool | |
7632 | aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) | |
7633 | { | |
7634 | /* The pattern matching functions above are written to look for a small | |
7635 | number to begin the sequence (0, 1, N/2). If we begin with an index | |
7636 | from the second operand, we can swap the operands. */ | |
7637 | if (d->perm[0] >= d->nelt) | |
7638 | { | |
7639 | unsigned i, nelt = d->nelt; | |
7640 | rtx x; | |
7641 | ||
7642 | for (i = 0; i < nelt; ++i) | |
7643 | d->perm[i] = (d->perm[i] + nelt) & (2 * nelt - 1); | |
7644 | ||
7645 | x = d->op0; | |
7646 | d->op0 = d->op1; | |
7647 | d->op1 = x; | |
7648 | } | |
7649 | ||
7650 | if (TARGET_SIMD) | |
cc4d934f JG |
7651 | { |
7652 | if (aarch64_evpc_zip (d)) | |
7653 | return true; | |
7654 | else if (aarch64_evpc_uzp (d)) | |
7655 | return true; | |
7656 | else if (aarch64_evpc_trn (d)) | |
7657 | return true; | |
7658 | return aarch64_evpc_tbl (d); | |
7659 | } | |
88b08073 JG |
7660 | return false; |
7661 | } | |
7662 | ||
7663 | /* Expand a vec_perm_const pattern. */ | |
7664 | ||
7665 | bool | |
7666 | aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel) | |
7667 | { | |
7668 | struct expand_vec_perm_d d; | |
7669 | int i, nelt, which; | |
7670 | ||
7671 | d.target = target; | |
7672 | d.op0 = op0; | |
7673 | d.op1 = op1; | |
7674 | ||
7675 | d.vmode = GET_MODE (target); | |
7676 | gcc_assert (VECTOR_MODE_P (d.vmode)); | |
7677 | d.nelt = nelt = GET_MODE_NUNITS (d.vmode); | |
7678 | d.testing_p = false; | |
7679 | ||
7680 | for (i = which = 0; i < nelt; ++i) | |
7681 | { | |
7682 | rtx e = XVECEXP (sel, 0, i); | |
7683 | int ei = INTVAL (e) & (2 * nelt - 1); | |
7684 | which |= (ei < nelt ? 1 : 2); | |
7685 | d.perm[i] = ei; | |
7686 | } | |
7687 | ||
7688 | switch (which) | |
7689 | { | |
7690 | default: | |
7691 | gcc_unreachable (); | |
7692 | ||
7693 | case 3: | |
7694 | d.one_vector_p = false; | |
7695 | if (!rtx_equal_p (op0, op1)) | |
7696 | break; | |
7697 | ||
7698 | /* The elements of PERM do not suggest that only the first operand | |
7699 | is used, but both operands are identical. Allow easier matching | |
7700 | of the permutation by folding the permutation into the single | |
7701 | input vector. */ | |
7702 | /* Fall Through. */ | |
7703 | case 2: | |
7704 | for (i = 0; i < nelt; ++i) | |
7705 | d.perm[i] &= nelt - 1; | |
7706 | d.op0 = op1; | |
7707 | d.one_vector_p = true; | |
7708 | break; | |
7709 | ||
7710 | case 1: | |
7711 | d.op1 = op0; | |
7712 | d.one_vector_p = true; | |
7713 | break; | |
7714 | } | |
7715 | ||
7716 | return aarch64_expand_vec_perm_const_1 (&d); | |
7717 | } | |
7718 | ||
7719 | static bool | |
7720 | aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode, | |
7721 | const unsigned char *sel) | |
7722 | { | |
7723 | struct expand_vec_perm_d d; | |
7724 | unsigned int i, nelt, which; | |
7725 | bool ret; | |
7726 | ||
7727 | d.vmode = vmode; | |
7728 | d.nelt = nelt = GET_MODE_NUNITS (d.vmode); | |
7729 | d.testing_p = true; | |
7730 | memcpy (d.perm, sel, nelt); | |
7731 | ||
7732 | /* Calculate whether all elements are in one vector. */ | |
7733 | for (i = which = 0; i < nelt; ++i) | |
7734 | { | |
7735 | unsigned char e = d.perm[i]; | |
7736 | gcc_assert (e < 2 * nelt); | |
7737 | which |= (e < nelt ? 1 : 2); | |
7738 | } | |
7739 | ||
7740 | /* If all elements are from the second vector, reindex as if from the | |
7741 | first vector. */ | |
7742 | if (which == 2) | |
7743 | for (i = 0; i < nelt; ++i) | |
7744 | d.perm[i] -= nelt; | |
7745 | ||
7746 | /* Check whether the mask can be applied to a single vector. */ | |
7747 | d.one_vector_p = (which != 3); | |
7748 | ||
7749 | d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); | |
7750 | d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2); | |
7751 | if (!d.one_vector_p) | |
7752 | d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); | |
7753 | ||
7754 | start_sequence (); | |
7755 | ret = aarch64_expand_vec_perm_const_1 (&d); | |
7756 | end_sequence (); | |
7757 | ||
7758 | return ret; | |
7759 | } | |
7760 | ||
43e9d192 IB |
7761 | #undef TARGET_ADDRESS_COST |
7762 | #define TARGET_ADDRESS_COST aarch64_address_cost | |
7763 | ||
7764 | /* This hook will determines whether unnamed bitfields affect the alignment | |
7765 | of the containing structure. The hook returns true if the structure | |
7766 | should inherit the alignment requirements of an unnamed bitfield's | |
7767 | type. */ | |
7768 | #undef TARGET_ALIGN_ANON_BITFIELD | |
7769 | #define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true | |
7770 | ||
7771 | #undef TARGET_ASM_ALIGNED_DI_OP | |
7772 | #define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t" | |
7773 | ||
7774 | #undef TARGET_ASM_ALIGNED_HI_OP | |
7775 | #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t" | |
7776 | ||
7777 | #undef TARGET_ASM_ALIGNED_SI_OP | |
7778 | #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" | |
7779 | ||
7780 | #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK | |
7781 | #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ | |
7782 | hook_bool_const_tree_hwi_hwi_const_tree_true | |
7783 | ||
7784 | #undef TARGET_ASM_FILE_START | |
7785 | #define TARGET_ASM_FILE_START aarch64_start_file | |
7786 | ||
7787 | #undef TARGET_ASM_OUTPUT_MI_THUNK | |
7788 | #define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk | |
7789 | ||
7790 | #undef TARGET_ASM_SELECT_RTX_SECTION | |
7791 | #define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section | |
7792 | ||
7793 | #undef TARGET_ASM_TRAMPOLINE_TEMPLATE | |
7794 | #define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template | |
7795 | ||
7796 | #undef TARGET_BUILD_BUILTIN_VA_LIST | |
7797 | #define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list | |
7798 | ||
7799 | #undef TARGET_CALLEE_COPIES | |
7800 | #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false | |
7801 | ||
7802 | #undef TARGET_CAN_ELIMINATE | |
7803 | #define TARGET_CAN_ELIMINATE aarch64_can_eliminate | |
7804 | ||
7805 | #undef TARGET_CANNOT_FORCE_CONST_MEM | |
7806 | #define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem | |
7807 | ||
7808 | #undef TARGET_CONDITIONAL_REGISTER_USAGE | |
7809 | #define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage | |
7810 | ||
7811 | /* Only the least significant bit is used for initialization guard | |
7812 | variables. */ | |
7813 | #undef TARGET_CXX_GUARD_MASK_BIT | |
7814 | #define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true | |
7815 | ||
7816 | #undef TARGET_C_MODE_FOR_SUFFIX | |
7817 | #define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix | |
7818 | ||
7819 | #ifdef TARGET_BIG_ENDIAN_DEFAULT | |
7820 | #undef TARGET_DEFAULT_TARGET_FLAGS | |
7821 | #define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END) | |
7822 | #endif | |
7823 | ||
7824 | #undef TARGET_CLASS_MAX_NREGS | |
7825 | #define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs | |
7826 | ||
119103ca JG |
7827 | #undef TARGET_BUILTIN_DECL |
7828 | #define TARGET_BUILTIN_DECL aarch64_builtin_decl | |
7829 | ||
43e9d192 IB |
7830 | #undef TARGET_EXPAND_BUILTIN |
7831 | #define TARGET_EXPAND_BUILTIN aarch64_expand_builtin | |
7832 | ||
7833 | #undef TARGET_EXPAND_BUILTIN_VA_START | |
7834 | #define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start | |
7835 | ||
7836 | #undef TARGET_FUNCTION_ARG | |
7837 | #define TARGET_FUNCTION_ARG aarch64_function_arg | |
7838 | ||
7839 | #undef TARGET_FUNCTION_ARG_ADVANCE | |
7840 | #define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance | |
7841 | ||
7842 | #undef TARGET_FUNCTION_ARG_BOUNDARY | |
7843 | #define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary | |
7844 | ||
7845 | #undef TARGET_FUNCTION_OK_FOR_SIBCALL | |
7846 | #define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall | |
7847 | ||
7848 | #undef TARGET_FUNCTION_VALUE | |
7849 | #define TARGET_FUNCTION_VALUE aarch64_function_value | |
7850 | ||
7851 | #undef TARGET_FUNCTION_VALUE_REGNO_P | |
7852 | #define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p | |
7853 | ||
7854 | #undef TARGET_FRAME_POINTER_REQUIRED | |
7855 | #define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required | |
7856 | ||
7857 | #undef TARGET_GIMPLIFY_VA_ARG_EXPR | |
7858 | #define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr | |
7859 | ||
7860 | #undef TARGET_INIT_BUILTINS | |
7861 | #define TARGET_INIT_BUILTINS aarch64_init_builtins | |
7862 | ||
7863 | #undef TARGET_LEGITIMATE_ADDRESS_P | |
7864 | #define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p | |
7865 | ||
7866 | #undef TARGET_LEGITIMATE_CONSTANT_P | |
7867 | #define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p | |
7868 | ||
7869 | #undef TARGET_LIBGCC_CMP_RETURN_MODE | |
7870 | #define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode | |
7871 | ||
ac2b960f YZ |
7872 | #undef TARGET_MANGLE_TYPE |
7873 | #define TARGET_MANGLE_TYPE aarch64_mangle_type | |
7874 | ||
43e9d192 IB |
7875 | #undef TARGET_MEMORY_MOVE_COST |
7876 | #define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost | |
7877 | ||
7878 | #undef TARGET_MUST_PASS_IN_STACK | |
7879 | #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size | |
7880 | ||
7881 | /* This target hook should return true if accesses to volatile bitfields | |
7882 | should use the narrowest mode possible. It should return false if these | |
7883 | accesses should use the bitfield container type. */ | |
7884 | #undef TARGET_NARROW_VOLATILE_BITFIELD | |
7885 | #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false | |
7886 | ||
7887 | #undef TARGET_OPTION_OVERRIDE | |
7888 | #define TARGET_OPTION_OVERRIDE aarch64_override_options | |
7889 | ||
7890 | #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE | |
7891 | #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \ | |
7892 | aarch64_override_options_after_change | |
7893 | ||
7894 | #undef TARGET_PASS_BY_REFERENCE | |
7895 | #define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference | |
7896 | ||
7897 | #undef TARGET_PREFERRED_RELOAD_CLASS | |
7898 | #define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class | |
7899 | ||
7900 | #undef TARGET_SECONDARY_RELOAD | |
7901 | #define TARGET_SECONDARY_RELOAD aarch64_secondary_reload | |
7902 | ||
7903 | #undef TARGET_SHIFT_TRUNCATION_MASK | |
7904 | #define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask | |
7905 | ||
7906 | #undef TARGET_SETUP_INCOMING_VARARGS | |
7907 | #define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs | |
7908 | ||
7909 | #undef TARGET_STRUCT_VALUE_RTX | |
7910 | #define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx | |
7911 | ||
7912 | #undef TARGET_REGISTER_MOVE_COST | |
7913 | #define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost | |
7914 | ||
7915 | #undef TARGET_RETURN_IN_MEMORY | |
7916 | #define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory | |
7917 | ||
7918 | #undef TARGET_RETURN_IN_MSB | |
7919 | #define TARGET_RETURN_IN_MSB aarch64_return_in_msb | |
7920 | ||
7921 | #undef TARGET_RTX_COSTS | |
7922 | #define TARGET_RTX_COSTS aarch64_rtx_costs | |
7923 | ||
7924 | #undef TARGET_TRAMPOLINE_INIT | |
7925 | #define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init | |
7926 | ||
7927 | #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P | |
7928 | #define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p | |
7929 | ||
7930 | #undef TARGET_VECTOR_MODE_SUPPORTED_P | |
7931 | #define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p | |
7932 | ||
7933 | #undef TARGET_ARRAY_MODE_SUPPORTED_P | |
7934 | #define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p | |
7935 | ||
7936 | #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE | |
7937 | #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode | |
7938 | ||
42fc9a7f JG |
7939 | #undef TARGET_VECTORIZE_BUILTINS |
7940 | #define TARGET_VECTORIZE_BUILTINS | |
7941 | ||
7942 | #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION | |
7943 | #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \ | |
7944 | aarch64_builtin_vectorized_function | |
7945 | ||
3b357264 JG |
7946 | #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES |
7947 | #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \ | |
7948 | aarch64_autovectorize_vector_sizes | |
7949 | ||
43e9d192 IB |
7950 | /* Section anchor support. */ |
7951 | ||
7952 | #undef TARGET_MIN_ANCHOR_OFFSET | |
7953 | #define TARGET_MIN_ANCHOR_OFFSET -256 | |
7954 | ||
7955 | /* Limit the maximum anchor offset to 4k-1, since that's the limit for a | |
7956 | byte offset; we can do much more for larger data types, but have no way | |
7957 | to determine the size of the access. We assume accesses are aligned. */ | |
7958 | #undef TARGET_MAX_ANCHOR_OFFSET | |
7959 | #define TARGET_MAX_ANCHOR_OFFSET 4095 | |
7960 | ||
db0253a4 TB |
7961 | #undef TARGET_VECTOR_ALIGNMENT |
7962 | #define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment | |
7963 | ||
7964 | #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE | |
7965 | #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \ | |
7966 | aarch64_simd_vector_alignment_reachable | |
7967 | ||
88b08073 JG |
7968 | /* vec_perm support. */ |
7969 | ||
7970 | #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK | |
7971 | #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \ | |
7972 | aarch64_vectorize_vec_perm_const_ok | |
7973 | ||
70f09188 | 7974 | |
706b2314 | 7975 | #undef TARGET_FIXED_CONDITION_CODE_REGS |
70f09188 AP |
7976 | #define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs |
7977 | ||
43e9d192 IB |
7978 | struct gcc_target targetm = TARGET_INITIALIZER; |
7979 | ||
7980 | #include "gt-aarch64.h" |