]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/aarch64/aarch64.c
Makefile.in: Replace s-interr-hwint.adb with s-interr-vxworks.adb throughout.
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64.c
CommitLineData
bdb7bf8a 1/* Machine description for AArch64 architecture.
818ab71a 2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
43e9d192
IB
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21#include "config.h"
01736018 22#define INCLUDE_STRING
43e9d192
IB
23#include "system.h"
24#include "coretypes.h"
c7131fb2 25#include "backend.h"
e11c4407
AM
26#include "target.h"
27#include "rtl.h"
c7131fb2 28#include "tree.h"
e73cf9a2 29#include "memmodel.h"
c7131fb2 30#include "gimple.h"
e11c4407
AM
31#include "cfghooks.h"
32#include "cfgloop.h"
c7131fb2 33#include "df.h"
e11c4407
AM
34#include "tm_p.h"
35#include "stringpool.h"
36#include "optabs.h"
37#include "regs.h"
38#include "emit-rtl.h"
39#include "recog.h"
40#include "diagnostic.h"
43e9d192 41#include "insn-attr.h"
40e23961 42#include "alias.h"
40e23961 43#include "fold-const.h"
d8a2d370
DN
44#include "stor-layout.h"
45#include "calls.h"
46#include "varasm.h"
43e9d192 47#include "output.h"
36566b39 48#include "flags.h"
36566b39 49#include "explow.h"
43e9d192
IB
50#include "expr.h"
51#include "reload.h"
43e9d192 52#include "langhooks.h"
5a2c8331 53#include "opts.h"
2d6bc7fa 54#include "params.h"
45b0be94 55#include "gimplify.h"
43e9d192 56#include "dwarf2.h"
61d371eb 57#include "gimple-iterator.h"
8990e73a 58#include "tree-vectorizer.h"
d1bcc29f 59#include "aarch64-cost-tables.h"
0ee859b5 60#include "dumpfile.h"
9b2b7279 61#include "builtins.h"
8baff86e 62#include "rtl-iter.h"
9bbe08fe 63#include "tm-constrs.h"
d03f7e44 64#include "sched-int.h"
d78006d9 65#include "target-globals.h"
a3eb8a52 66#include "common/common-target.h"
43e9d192 67
994c5d85 68/* This file should be included last. */
d58627a0
RS
69#include "target-def.h"
70
28514dda
YZ
71/* Defined for convenience. */
72#define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT)
73
43e9d192
IB
74/* Classifies an address.
75
76 ADDRESS_REG_IMM
77 A simple base register plus immediate offset.
78
79 ADDRESS_REG_WB
80 A base register indexed by immediate offset with writeback.
81
82 ADDRESS_REG_REG
83 A base register indexed by (optionally scaled) register.
84
85 ADDRESS_REG_UXTW
86 A base register indexed by (optionally scaled) zero-extended register.
87
88 ADDRESS_REG_SXTW
89 A base register indexed by (optionally scaled) sign-extended register.
90
91 ADDRESS_LO_SUM
92 A LO_SUM rtx with a base register and "LO12" symbol relocation.
93
94 ADDRESS_SYMBOLIC:
95 A constant symbolic address, in pc-relative literal pool. */
96
97enum aarch64_address_type {
98 ADDRESS_REG_IMM,
99 ADDRESS_REG_WB,
100 ADDRESS_REG_REG,
101 ADDRESS_REG_UXTW,
102 ADDRESS_REG_SXTW,
103 ADDRESS_LO_SUM,
104 ADDRESS_SYMBOLIC
105};
106
107struct aarch64_address_info {
108 enum aarch64_address_type type;
109 rtx base;
110 rtx offset;
111 int shift;
112 enum aarch64_symbol_type symbol_type;
113};
114
48063b9d
IB
115struct simd_immediate_info
116{
117 rtx value;
118 int shift;
119 int element_width;
48063b9d 120 bool mvn;
e4f0f84d 121 bool msl;
48063b9d
IB
122};
123
43e9d192
IB
124/* The current code model. */
125enum aarch64_code_model aarch64_cmodel;
126
127#ifdef HAVE_AS_TLS
128#undef TARGET_HAVE_TLS
129#define TARGET_HAVE_TLS 1
130#endif
131
ef4bddc2
RS
132static bool aarch64_composite_type_p (const_tree, machine_mode);
133static bool aarch64_vfp_is_call_or_return_candidate (machine_mode,
43e9d192 134 const_tree,
ef4bddc2 135 machine_mode *, int *,
43e9d192
IB
136 bool *);
137static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
138static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
43e9d192 139static void aarch64_override_options_after_change (void);
ef4bddc2 140static bool aarch64_vector_mode_supported_p (machine_mode);
ef4bddc2 141static bool aarch64_vectorize_vec_perm_const_ok (machine_mode vmode,
88b08073 142 const unsigned char *sel);
ef4bddc2 143static int aarch64_address_cost (rtx, machine_mode, addr_space_t, bool);
88b08073 144
0c6caaf8
RL
145/* Major revision number of the ARM Architecture implemented by the target. */
146unsigned aarch64_architecture_version;
147
43e9d192 148/* The processor for which instructions should be scheduled. */
02fdbd5b 149enum aarch64_processor aarch64_tune = cortexa53;
43e9d192 150
43e9d192
IB
151/* Mask to specify which instruction scheduling options should be used. */
152unsigned long aarch64_tune_flags = 0;
153
1be34295 154/* Global flag for PC relative loads. */
9ee6540a 155bool aarch64_pcrelative_literal_loads;
1be34295 156
8dec06f2
JG
157/* Support for command line parsing of boolean flags in the tuning
158 structures. */
159struct aarch64_flag_desc
160{
161 const char* name;
162 unsigned int flag;
163};
164
ed9fa8d2 165#define AARCH64_FUSION_PAIR(name, internal_name) \
8dec06f2
JG
166 { name, AARCH64_FUSE_##internal_name },
167static const struct aarch64_flag_desc aarch64_fusible_pairs[] =
168{
169 { "none", AARCH64_FUSE_NOTHING },
170#include "aarch64-fusion-pairs.def"
171 { "all", AARCH64_FUSE_ALL },
172 { NULL, AARCH64_FUSE_NOTHING }
173};
8dec06f2 174
a339a01c 175#define AARCH64_EXTRA_TUNING_OPTION(name, internal_name) \
8dec06f2
JG
176 { name, AARCH64_EXTRA_TUNE_##internal_name },
177static const struct aarch64_flag_desc aarch64_tuning_flags[] =
178{
179 { "none", AARCH64_EXTRA_TUNE_NONE },
180#include "aarch64-tuning-flags.def"
181 { "all", AARCH64_EXTRA_TUNE_ALL },
182 { NULL, AARCH64_EXTRA_TUNE_NONE }
183};
8dec06f2 184
43e9d192
IB
185/* Tuning parameters. */
186
43e9d192
IB
187static const struct cpu_addrcost_table generic_addrcost_table =
188{
67747367 189 {
bd95e655
JG
190 0, /* hi */
191 0, /* si */
192 0, /* di */
193 0, /* ti */
67747367 194 },
bd95e655
JG
195 0, /* pre_modify */
196 0, /* post_modify */
197 0, /* register_offset */
783879e6
EM
198 0, /* register_sextend */
199 0, /* register_zextend */
bd95e655 200 0 /* imm_offset */
43e9d192
IB
201};
202
60bff090
JG
203static const struct cpu_addrcost_table cortexa57_addrcost_table =
204{
60bff090 205 {
bd95e655
JG
206 1, /* hi */
207 0, /* si */
208 0, /* di */
209 1, /* ti */
60bff090 210 },
bd95e655
JG
211 0, /* pre_modify */
212 0, /* post_modify */
213 0, /* register_offset */
783879e6
EM
214 0, /* register_sextend */
215 0, /* register_zextend */
bd95e655 216 0, /* imm_offset */
60bff090
JG
217};
218
5ec1ae3b
EM
219static const struct cpu_addrcost_table exynosm1_addrcost_table =
220{
221 {
222 0, /* hi */
223 0, /* si */
224 0, /* di */
225 2, /* ti */
226 },
227 0, /* pre_modify */
228 0, /* post_modify */
229 1, /* register_offset */
230 1, /* register_sextend */
231 2, /* register_zextend */
232 0, /* imm_offset */
233};
234
381e27aa
PT
235static const struct cpu_addrcost_table xgene1_addrcost_table =
236{
381e27aa 237 {
bd95e655
JG
238 1, /* hi */
239 0, /* si */
240 0, /* di */
241 1, /* ti */
381e27aa 242 },
bd95e655
JG
243 1, /* pre_modify */
244 0, /* post_modify */
245 0, /* register_offset */
783879e6
EM
246 1, /* register_sextend */
247 1, /* register_zextend */
bd95e655 248 0, /* imm_offset */
381e27aa
PT
249};
250
ee446d9f
JW
251static const struct cpu_addrcost_table qdf24xx_addrcost_table =
252{
253 {
254 1, /* hi */
255 0, /* si */
256 0, /* di */
257 1, /* ti */
258 },
259 0, /* pre_modify */
260 0, /* post_modify */
261 0, /* register_offset */
262 0, /* register_sextend */
263 0, /* register_zextend */
264 0 /* imm_offset */
265};
266
ad611a4c
VP
267static const struct cpu_addrcost_table vulcan_addrcost_table =
268{
269 {
270 0, /* hi */
271 0, /* si */
272 0, /* di */
273 2, /* ti */
274 },
275 0, /* pre_modify */
276 0, /* post_modify */
277 2, /* register_offset */
278 3, /* register_sextend */
279 3, /* register_zextend */
280 0, /* imm_offset */
281};
282
43e9d192
IB
283static const struct cpu_regmove_cost generic_regmove_cost =
284{
bd95e655 285 1, /* GP2GP */
3969c510
WD
286 /* Avoid the use of slow int<->fp moves for spilling by setting
287 their cost higher than memmov_cost. */
bd95e655
JG
288 5, /* GP2FP */
289 5, /* FP2GP */
290 2 /* FP2FP */
43e9d192
IB
291};
292
e4a9c55a
WD
293static const struct cpu_regmove_cost cortexa57_regmove_cost =
294{
bd95e655 295 1, /* GP2GP */
e4a9c55a
WD
296 /* Avoid the use of slow int<->fp moves for spilling by setting
297 their cost higher than memmov_cost. */
bd95e655
JG
298 5, /* GP2FP */
299 5, /* FP2GP */
300 2 /* FP2FP */
e4a9c55a
WD
301};
302
303static const struct cpu_regmove_cost cortexa53_regmove_cost =
304{
bd95e655 305 1, /* GP2GP */
e4a9c55a
WD
306 /* Avoid the use of slow int<->fp moves for spilling by setting
307 their cost higher than memmov_cost. */
bd95e655
JG
308 5, /* GP2FP */
309 5, /* FP2GP */
310 2 /* FP2FP */
e4a9c55a
WD
311};
312
5ec1ae3b
EM
313static const struct cpu_regmove_cost exynosm1_regmove_cost =
314{
315 1, /* GP2GP */
316 /* Avoid the use of slow int<->fp moves for spilling by setting
317 their cost higher than memmov_cost (actual, 4 and 9). */
318 9, /* GP2FP */
319 9, /* FP2GP */
320 1 /* FP2FP */
321};
322
d1bcc29f
AP
323static const struct cpu_regmove_cost thunderx_regmove_cost =
324{
bd95e655
JG
325 2, /* GP2GP */
326 2, /* GP2FP */
327 6, /* FP2GP */
328 4 /* FP2FP */
d1bcc29f
AP
329};
330
381e27aa
PT
331static const struct cpu_regmove_cost xgene1_regmove_cost =
332{
bd95e655 333 1, /* GP2GP */
381e27aa
PT
334 /* Avoid the use of slow int<->fp moves for spilling by setting
335 their cost higher than memmov_cost. */
bd95e655
JG
336 8, /* GP2FP */
337 8, /* FP2GP */
338 2 /* FP2FP */
381e27aa
PT
339};
340
ee446d9f
JW
341static const struct cpu_regmove_cost qdf24xx_regmove_cost =
342{
343 2, /* GP2GP */
344 /* Avoid the use of int<->fp moves for spilling. */
345 6, /* GP2FP */
346 6, /* FP2GP */
347 4 /* FP2FP */
348};
349
ad611a4c
VP
350static const struct cpu_regmove_cost vulcan_regmove_cost =
351{
352 1, /* GP2GP */
353 /* Avoid the use of int<->fp moves for spilling. */
354 8, /* GP2FP */
355 8, /* FP2GP */
356 4 /* FP2FP */
357};
358
8990e73a 359/* Generic costs for vector insn classes. */
8990e73a
TB
360static const struct cpu_vector_cost generic_vector_cost =
361{
bd95e655
JG
362 1, /* scalar_stmt_cost */
363 1, /* scalar_load_cost */
364 1, /* scalar_store_cost */
365 1, /* vec_stmt_cost */
c428f91c 366 2, /* vec_permute_cost */
bd95e655
JG
367 1, /* vec_to_scalar_cost */
368 1, /* scalar_to_vec_cost */
369 1, /* vec_align_load_cost */
370 1, /* vec_unalign_load_cost */
371 1, /* vec_unalign_store_cost */
372 1, /* vec_store_cost */
373 3, /* cond_taken_branch_cost */
374 1 /* cond_not_taken_branch_cost */
8990e73a
TB
375};
376
c3f20327
AP
377/* ThunderX costs for vector insn classes. */
378static const struct cpu_vector_cost thunderx_vector_cost =
379{
380 1, /* scalar_stmt_cost */
381 3, /* scalar_load_cost */
382 1, /* scalar_store_cost */
383 4, /* vec_stmt_cost */
384 4, /* vec_permute_cost */
385 2, /* vec_to_scalar_cost */
386 2, /* scalar_to_vec_cost */
387 3, /* vec_align_load_cost */
388 10, /* vec_unalign_load_cost */
389 10, /* vec_unalign_store_cost */
390 1, /* vec_store_cost */
391 3, /* cond_taken_branch_cost */
392 3 /* cond_not_taken_branch_cost */
393};
394
60bff090 395/* Generic costs for vector insn classes. */
60bff090
JG
396static const struct cpu_vector_cost cortexa57_vector_cost =
397{
bd95e655
JG
398 1, /* scalar_stmt_cost */
399 4, /* scalar_load_cost */
400 1, /* scalar_store_cost */
401 3, /* vec_stmt_cost */
c428f91c 402 3, /* vec_permute_cost */
bd95e655
JG
403 8, /* vec_to_scalar_cost */
404 8, /* scalar_to_vec_cost */
405 5, /* vec_align_load_cost */
406 5, /* vec_unalign_load_cost */
407 1, /* vec_unalign_store_cost */
408 1, /* vec_store_cost */
409 1, /* cond_taken_branch_cost */
410 1 /* cond_not_taken_branch_cost */
60bff090
JG
411};
412
5ec1ae3b
EM
413static const struct cpu_vector_cost exynosm1_vector_cost =
414{
415 1, /* scalar_stmt_cost */
416 5, /* scalar_load_cost */
417 1, /* scalar_store_cost */
418 3, /* vec_stmt_cost */
c428f91c 419 3, /* vec_permute_cost */
5ec1ae3b
EM
420 3, /* vec_to_scalar_cost */
421 3, /* scalar_to_vec_cost */
422 5, /* vec_align_load_cost */
423 5, /* vec_unalign_load_cost */
424 1, /* vec_unalign_store_cost */
425 1, /* vec_store_cost */
426 1, /* cond_taken_branch_cost */
427 1 /* cond_not_taken_branch_cost */
428};
429
381e27aa 430/* Generic costs for vector insn classes. */
381e27aa
PT
431static const struct cpu_vector_cost xgene1_vector_cost =
432{
bd95e655
JG
433 1, /* scalar_stmt_cost */
434 5, /* scalar_load_cost */
435 1, /* scalar_store_cost */
436 2, /* vec_stmt_cost */
c428f91c 437 2, /* vec_permute_cost */
bd95e655
JG
438 4, /* vec_to_scalar_cost */
439 4, /* scalar_to_vec_cost */
440 10, /* vec_align_load_cost */
441 10, /* vec_unalign_load_cost */
442 2, /* vec_unalign_store_cost */
443 2, /* vec_store_cost */
444 2, /* cond_taken_branch_cost */
445 1 /* cond_not_taken_branch_cost */
381e27aa
PT
446};
447
ad611a4c
VP
448/* Costs for vector insn classes for Vulcan. */
449static const struct cpu_vector_cost vulcan_vector_cost =
450{
451 6, /* scalar_stmt_cost */
452 4, /* scalar_load_cost */
453 1, /* scalar_store_cost */
454 6, /* vec_stmt_cost */
455 3, /* vec_permute_cost */
456 6, /* vec_to_scalar_cost */
457 5, /* scalar_to_vec_cost */
458 8, /* vec_align_load_cost */
459 8, /* vec_unalign_load_cost */
460 4, /* vec_unalign_store_cost */
461 4, /* vec_store_cost */
462 2, /* cond_taken_branch_cost */
463 1 /* cond_not_taken_branch_cost */
464};
465
b9066f5a
MW
466/* Generic costs for branch instructions. */
467static const struct cpu_branch_cost generic_branch_cost =
468{
469 2, /* Predictable. */
470 2 /* Unpredictable. */
471};
472
67707f65
JG
473/* Branch costs for Cortex-A57. */
474static const struct cpu_branch_cost cortexa57_branch_cost =
475{
476 1, /* Predictable. */
477 3 /* Unpredictable. */
478};
479
ad611a4c
VP
480/* Branch costs for Vulcan. */
481static const struct cpu_branch_cost vulcan_branch_cost =
482{
483 1, /* Predictable. */
484 3 /* Unpredictable. */
485};
486
9acc9cbe
EM
487/* Generic approximation modes. */
488static const cpu_approx_modes generic_approx_modes =
489{
79a2bc2d 490 AARCH64_APPROX_NONE, /* division */
98daafa0 491 AARCH64_APPROX_NONE, /* sqrt */
9acc9cbe
EM
492 AARCH64_APPROX_NONE /* recip_sqrt */
493};
494
495/* Approximation modes for Exynos M1. */
496static const cpu_approx_modes exynosm1_approx_modes =
497{
79a2bc2d 498 AARCH64_APPROX_NONE, /* division */
98daafa0 499 AARCH64_APPROX_ALL, /* sqrt */
9acc9cbe
EM
500 AARCH64_APPROX_ALL /* recip_sqrt */
501};
502
503/* Approximation modes for X-Gene 1. */
504static const cpu_approx_modes xgene1_approx_modes =
505{
79a2bc2d 506 AARCH64_APPROX_NONE, /* division */
98daafa0 507 AARCH64_APPROX_NONE, /* sqrt */
9acc9cbe
EM
508 AARCH64_APPROX_ALL /* recip_sqrt */
509};
510
43e9d192
IB
511static const struct tune_params generic_tunings =
512{
4e2cd668 513 &cortexa57_extra_costs,
43e9d192
IB
514 &generic_addrcost_table,
515 &generic_regmove_cost,
8990e73a 516 &generic_vector_cost,
b9066f5a 517 &generic_branch_cost,
9acc9cbe 518 &generic_approx_modes,
bd95e655
JG
519 4, /* memmov_cost */
520 2, /* issue_rate */
e9a3a175 521 AARCH64_FUSE_NOTHING, /* fusible_ops */
0b82a5a2
WD
522 8, /* function_align. */
523 8, /* jump_align. */
524 4, /* loop_align. */
cee66c68
WD
525 2, /* int_reassoc_width. */
526 4, /* fp_reassoc_width. */
50093a33
WD
527 1, /* vec_reassoc_width. */
528 2, /* min_div_recip_mul_sf. */
dfba575f 529 2, /* min_div_recip_mul_df. */
50487d79
EM
530 0, /* max_case_values. */
531 0, /* cache_line_size. */
2d6bc7fa 532 tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
dfba575f 533 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
43e9d192
IB
534};
535
1c72a3ca
JG
536static const struct tune_params cortexa35_tunings =
537{
538 &cortexa53_extra_costs,
539 &generic_addrcost_table,
540 &cortexa53_regmove_cost,
541 &generic_vector_cost,
0bc24338 542 &cortexa57_branch_cost,
9acc9cbe 543 &generic_approx_modes,
1c72a3ca
JG
544 4, /* memmov_cost */
545 1, /* issue_rate */
0bc24338 546 (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
1c72a3ca 547 | AARCH64_FUSE_MOVK_MOVK | AARCH64_FUSE_ADRP_LDR), /* fusible_ops */
d4407370 548 16, /* function_align. */
1c72a3ca 549 8, /* jump_align. */
d4407370 550 8, /* loop_align. */
1c72a3ca
JG
551 2, /* int_reassoc_width. */
552 4, /* fp_reassoc_width. */
553 1, /* vec_reassoc_width. */
554 2, /* min_div_recip_mul_sf. */
555 2, /* min_div_recip_mul_df. */
556 0, /* max_case_values. */
557 0, /* cache_line_size. */
558 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
559 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
560};
561
984239ad
KT
562static const struct tune_params cortexa53_tunings =
563{
564 &cortexa53_extra_costs,
565 &generic_addrcost_table,
e4a9c55a 566 &cortexa53_regmove_cost,
984239ad 567 &generic_vector_cost,
0bc24338 568 &cortexa57_branch_cost,
9acc9cbe 569 &generic_approx_modes,
bd95e655
JG
570 4, /* memmov_cost */
571 2, /* issue_rate */
00a8574a 572 (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
e9a3a175 573 | AARCH64_FUSE_MOVK_MOVK | AARCH64_FUSE_ADRP_LDR), /* fusible_ops */
d4407370 574 16, /* function_align. */
0b82a5a2 575 8, /* jump_align. */
d4407370 576 8, /* loop_align. */
cee66c68
WD
577 2, /* int_reassoc_width. */
578 4, /* fp_reassoc_width. */
50093a33
WD
579 1, /* vec_reassoc_width. */
580 2, /* min_div_recip_mul_sf. */
dfba575f 581 2, /* min_div_recip_mul_df. */
50487d79
EM
582 0, /* max_case_values. */
583 0, /* cache_line_size. */
2d6bc7fa 584 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
dfba575f 585 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
984239ad
KT
586};
587
4fd92af6
KT
588static const struct tune_params cortexa57_tunings =
589{
590 &cortexa57_extra_costs,
60bff090 591 &cortexa57_addrcost_table,
e4a9c55a 592 &cortexa57_regmove_cost,
60bff090 593 &cortexa57_vector_cost,
67707f65 594 &cortexa57_branch_cost,
9acc9cbe 595 &generic_approx_modes,
bd95e655
JG
596 4, /* memmov_cost */
597 3, /* issue_rate */
00a8574a 598 (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
e9a3a175 599 | AARCH64_FUSE_MOVK_MOVK), /* fusible_ops */
0b82a5a2
WD
600 16, /* function_align. */
601 8, /* jump_align. */
d4407370 602 8, /* loop_align. */
cee66c68
WD
603 2, /* int_reassoc_width. */
604 4, /* fp_reassoc_width. */
50093a33
WD
605 1, /* vec_reassoc_width. */
606 2, /* min_div_recip_mul_sf. */
dfba575f 607 2, /* min_div_recip_mul_df. */
50487d79
EM
608 0, /* max_case_values. */
609 0, /* cache_line_size. */
2d6bc7fa 610 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
7c175186 611 (AARCH64_EXTRA_TUNE_RENAME_FMA_REGS) /* tune_flags. */
dfba575f
JG
612};
613
614static const struct tune_params cortexa72_tunings =
615{
616 &cortexa57_extra_costs,
617 &cortexa57_addrcost_table,
618 &cortexa57_regmove_cost,
619 &cortexa57_vector_cost,
0bc24338 620 &cortexa57_branch_cost,
9acc9cbe 621 &generic_approx_modes,
dfba575f
JG
622 4, /* memmov_cost */
623 3, /* issue_rate */
00a8574a 624 (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
dfba575f
JG
625 | AARCH64_FUSE_MOVK_MOVK), /* fusible_ops */
626 16, /* function_align. */
627 8, /* jump_align. */
d4407370 628 8, /* loop_align. */
dfba575f
JG
629 2, /* int_reassoc_width. */
630 4, /* fp_reassoc_width. */
631 1, /* vec_reassoc_width. */
632 2, /* min_div_recip_mul_sf. */
633 2, /* min_div_recip_mul_df. */
50487d79
EM
634 0, /* max_case_values. */
635 0, /* cache_line_size. */
0bc24338 636 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
dfba575f 637 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
4fd92af6
KT
638};
639
4fb570c4
KT
640static const struct tune_params cortexa73_tunings =
641{
642 &cortexa57_extra_costs,
643 &cortexa57_addrcost_table,
644 &cortexa57_regmove_cost,
645 &cortexa57_vector_cost,
0bc24338 646 &cortexa57_branch_cost,
4fb570c4
KT
647 &generic_approx_modes,
648 4, /* memmov_cost. */
649 2, /* issue_rate. */
650 (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
651 | AARCH64_FUSE_MOVK_MOVK | AARCH64_FUSE_ADRP_LDR), /* fusible_ops */
652 16, /* function_align. */
653 8, /* jump_align. */
d4407370 654 8, /* loop_align. */
4fb570c4
KT
655 2, /* int_reassoc_width. */
656 4, /* fp_reassoc_width. */
657 1, /* vec_reassoc_width. */
658 2, /* min_div_recip_mul_sf. */
659 2, /* min_div_recip_mul_df. */
660 0, /* max_case_values. */
661 0, /* cache_line_size. */
662 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
663 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
664};
665
5ec1ae3b
EM
666static const struct tune_params exynosm1_tunings =
667{
668 &exynosm1_extra_costs,
669 &exynosm1_addrcost_table,
670 &exynosm1_regmove_cost,
671 &exynosm1_vector_cost,
672 &generic_branch_cost,
9acc9cbe 673 &exynosm1_approx_modes,
5ec1ae3b
EM
674 4, /* memmov_cost */
675 3, /* issue_rate */
25cc2199 676 (AARCH64_FUSE_AES_AESMC), /* fusible_ops */
5ec1ae3b
EM
677 4, /* function_align. */
678 4, /* jump_align. */
679 4, /* loop_align. */
680 2, /* int_reassoc_width. */
681 4, /* fp_reassoc_width. */
682 1, /* vec_reassoc_width. */
683 2, /* min_div_recip_mul_sf. */
684 2, /* min_div_recip_mul_df. */
685 48, /* max_case_values. */
686 64, /* cache_line_size. */
220379df 687 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
9acc9cbe 688 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
5ec1ae3b
EM
689};
690
d1bcc29f
AP
691static const struct tune_params thunderx_tunings =
692{
693 &thunderx_extra_costs,
694 &generic_addrcost_table,
695 &thunderx_regmove_cost,
c3f20327 696 &thunderx_vector_cost,
b9066f5a 697 &generic_branch_cost,
9acc9cbe 698 &generic_approx_modes,
bd95e655
JG
699 6, /* memmov_cost */
700 2, /* issue_rate */
e9a3a175 701 AARCH64_FUSE_CMP_BRANCH, /* fusible_ops */
0b82a5a2
WD
702 8, /* function_align. */
703 8, /* jump_align. */
704 8, /* loop_align. */
cee66c68
WD
705 2, /* int_reassoc_width. */
706 4, /* fp_reassoc_width. */
50093a33
WD
707 1, /* vec_reassoc_width. */
708 2, /* min_div_recip_mul_sf. */
dfba575f 709 2, /* min_div_recip_mul_df. */
50487d79
EM
710 0, /* max_case_values. */
711 0, /* cache_line_size. */
2d6bc7fa 712 tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
54700e2e 713 (AARCH64_EXTRA_TUNE_SLOW_UNALIGNED_LDPW) /* tune_flags. */
d1bcc29f
AP
714};
715
381e27aa
PT
716static const struct tune_params xgene1_tunings =
717{
718 &xgene1_extra_costs,
719 &xgene1_addrcost_table,
720 &xgene1_regmove_cost,
721 &xgene1_vector_cost,
b9066f5a 722 &generic_branch_cost,
9acc9cbe 723 &xgene1_approx_modes,
bd95e655
JG
724 6, /* memmov_cost */
725 4, /* issue_rate */
e9a3a175 726 AARCH64_FUSE_NOTHING, /* fusible_ops */
381e27aa
PT
727 16, /* function_align. */
728 8, /* jump_align. */
729 16, /* loop_align. */
730 2, /* int_reassoc_width. */
731 4, /* fp_reassoc_width. */
50093a33
WD
732 1, /* vec_reassoc_width. */
733 2, /* min_div_recip_mul_sf. */
dfba575f 734 2, /* min_div_recip_mul_df. */
50487d79
EM
735 0, /* max_case_values. */
736 0, /* cache_line_size. */
2d6bc7fa 737 tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
9acc9cbe 738 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
381e27aa
PT
739};
740
ee446d9f
JW
741static const struct tune_params qdf24xx_tunings =
742{
743 &qdf24xx_extra_costs,
744 &qdf24xx_addrcost_table,
745 &qdf24xx_regmove_cost,
746 &generic_vector_cost,
747 &generic_branch_cost,
748 &generic_approx_modes,
749 4, /* memmov_cost */
750 4, /* issue_rate */
751 (AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
752 | AARCH64_FUSE_MOVK_MOVK), /* fuseable_ops */
753 16, /* function_align. */
754 8, /* jump_align. */
755 16, /* loop_align. */
756 2, /* int_reassoc_width. */
757 4, /* fp_reassoc_width. */
758 1, /* vec_reassoc_width. */
759 2, /* min_div_recip_mul_sf. */
760 2, /* min_div_recip_mul_df. */
761 0, /* max_case_values. */
762 64, /* cache_line_size. */
763 tune_params::AUTOPREFETCHER_STRONG, /* autoprefetcher_model. */
764 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
765};
766
ad611a4c
VP
767static const struct tune_params vulcan_tunings =
768{
769 &vulcan_extra_costs,
770 &vulcan_addrcost_table,
771 &vulcan_regmove_cost,
772 &vulcan_vector_cost,
773 &vulcan_branch_cost,
774 &generic_approx_modes,
775 4, /* memmov_cost. */
776 4, /* issue_rate. */
777 AARCH64_FUSE_NOTHING, /* fuseable_ops. */
778 16, /* function_align. */
779 8, /* jump_align. */
780 16, /* loop_align. */
781 3, /* int_reassoc_width. */
782 2, /* fp_reassoc_width. */
783 2, /* vec_reassoc_width. */
784 2, /* min_div_recip_mul_sf. */
785 2, /* min_div_recip_mul_df. */
786 0, /* max_case_values. */
b91cd96b 787 64, /* cache_line_size. */
ad611a4c
VP
788 tune_params::AUTOPREFETCHER_OFF, /* autoprefetcher_model. */
789 (AARCH64_EXTRA_TUNE_NONE) /* tune_flags. */
790};
791
8dec06f2
JG
792/* Support for fine-grained override of the tuning structures. */
793struct aarch64_tuning_override_function
794{
795 const char* name;
796 void (*parse_override)(const char*, struct tune_params*);
797};
798
799static void aarch64_parse_fuse_string (const char*, struct tune_params*);
800static void aarch64_parse_tune_string (const char*, struct tune_params*);
801
802static const struct aarch64_tuning_override_function
803aarch64_tuning_override_functions[] =
804{
805 { "fuse", aarch64_parse_fuse_string },
806 { "tune", aarch64_parse_tune_string },
807 { NULL, NULL }
808};
809
43e9d192
IB
810/* A processor implementing AArch64. */
811struct processor
812{
813 const char *const name;
46806c44
KT
814 enum aarch64_processor ident;
815 enum aarch64_processor sched_core;
393ae126 816 enum aarch64_arch arch;
0c6caaf8 817 unsigned architecture_version;
43e9d192
IB
818 const unsigned long flags;
819 const struct tune_params *const tune;
820};
821
393ae126
KT
822/* Architectures implementing AArch64. */
823static const struct processor all_architectures[] =
824{
825#define AARCH64_ARCH(NAME, CORE, ARCH_IDENT, ARCH_REV, FLAGS) \
826 {NAME, CORE, CORE, AARCH64_ARCH_##ARCH_IDENT, ARCH_REV, FLAGS, NULL},
827#include "aarch64-arches.def"
393ae126
KT
828 {NULL, aarch64_none, aarch64_none, aarch64_no_arch, 0, 0, NULL}
829};
830
43e9d192
IB
831/* Processor cores implementing AArch64. */
832static const struct processor all_cores[] =
833{
7e1bcce3 834#define AARCH64_CORE(NAME, IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART) \
393ae126
KT
835 {NAME, IDENT, SCHED, AARCH64_ARCH_##ARCH, \
836 all_architectures[AARCH64_ARCH_##ARCH].architecture_version, \
837 FLAGS, &COSTS##_tunings},
43e9d192 838#include "aarch64-cores.def"
393ae126
KT
839 {"generic", generic, cortexa53, AARCH64_ARCH_8A, 8,
840 AARCH64_FL_FOR_ARCH8, &generic_tunings},
841 {NULL, aarch64_none, aarch64_none, aarch64_no_arch, 0, 0, NULL}
43e9d192
IB
842};
843
43e9d192 844
361fb3ee
KT
845/* Target specification. These are populated by the -march, -mtune, -mcpu
846 handling code or by target attributes. */
43e9d192
IB
847static const struct processor *selected_arch;
848static const struct processor *selected_cpu;
849static const struct processor *selected_tune;
850
b175b679
JG
851/* The current tuning set. */
852struct tune_params aarch64_tune_params = generic_tunings;
853
43e9d192
IB
854#define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
855
856/* An ISA extension in the co-processor and main instruction set space. */
857struct aarch64_option_extension
858{
859 const char *const name;
860 const unsigned long flags_on;
861 const unsigned long flags_off;
862};
863
43e9d192
IB
864typedef enum aarch64_cond_code
865{
866 AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
867 AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
868 AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
869}
870aarch64_cc;
871
872#define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
873
874/* The condition codes of the processor, and the inverse function. */
875static const char * const aarch64_condition_codes[] =
876{
877 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
878 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
879};
880
973d2e01
TP
881/* Generate code to enable conditional branches in functions over 1 MiB. */
882const char *
883aarch64_gen_far_branch (rtx * operands, int pos_label, const char * dest,
884 const char * branch_format)
885{
886 rtx_code_label * tmp_label = gen_label_rtx ();
887 char label_buf[256];
888 char buffer[128];
889 ASM_GENERATE_INTERNAL_LABEL (label_buf, dest,
890 CODE_LABEL_NUMBER (tmp_label));
891 const char *label_ptr = targetm.strip_name_encoding (label_buf);
892 rtx dest_label = operands[pos_label];
893 operands[pos_label] = tmp_label;
894
895 snprintf (buffer, sizeof (buffer), "%s%s", branch_format, label_ptr);
896 output_asm_insn (buffer, operands);
897
898 snprintf (buffer, sizeof (buffer), "b\t%%l%d\n%s:", pos_label, label_ptr);
899 operands[pos_label] = dest_label;
900 output_asm_insn (buffer, operands);
901 return "";
902}
903
261fb553
AL
904void
905aarch64_err_no_fpadvsimd (machine_mode mode, const char *msg)
906{
907 const char *mc = FLOAT_MODE_P (mode) ? "floating-point" : "vector";
908 if (TARGET_GENERAL_REGS_ONLY)
909 error ("%qs is incompatible with %s %s", "-mgeneral-regs-only", mc, msg);
910 else
911 error ("%qs feature modifier is incompatible with %s %s", "+nofp", mc, msg);
912}
913
c64f7d37
WD
914/* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
915 The register allocator chooses ALL_REGS if FP_REGS and GENERAL_REGS have
31e2b5a3
WD
916 the same cost even if ALL_REGS has a much larger cost. ALL_REGS is also
917 used if the cost of both FP_REGS and GENERAL_REGS is lower than the memory
918 cost (in this case the best class is the lowest cost one). Using ALL_REGS
919 irrespectively of its cost results in bad allocations with many redundant
920 int<->FP moves which are expensive on various cores.
921 To avoid this we don't allow ALL_REGS as the allocno class, but force a
922 decision between FP_REGS and GENERAL_REGS. We use the allocno class if it
923 isn't ALL_REGS. Similarly, use the best class if it isn't ALL_REGS.
924 Otherwise set the allocno class depending on the mode.
925 The result of this is that it is no longer inefficient to have a higher
926 memory move cost than the register move cost.
927*/
c64f7d37
WD
928
929static reg_class_t
31e2b5a3
WD
930aarch64_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class,
931 reg_class_t best_class)
c64f7d37
WD
932{
933 enum machine_mode mode;
934
935 if (allocno_class != ALL_REGS)
936 return allocno_class;
937
31e2b5a3
WD
938 if (best_class != ALL_REGS)
939 return best_class;
940
c64f7d37
WD
941 mode = PSEUDO_REGNO_MODE (regno);
942 return FLOAT_MODE_P (mode) || VECTOR_MODE_P (mode) ? FP_REGS : GENERAL_REGS;
943}
944
26e0ff94 945static unsigned int
50093a33 946aarch64_min_divisions_for_recip_mul (enum machine_mode mode)
26e0ff94 947{
50093a33 948 if (GET_MODE_UNIT_SIZE (mode) == 4)
b175b679
JG
949 return aarch64_tune_params.min_div_recip_mul_sf;
950 return aarch64_tune_params.min_div_recip_mul_df;
26e0ff94
WD
951}
952
cee66c68
WD
953static int
954aarch64_reassociation_width (unsigned opc ATTRIBUTE_UNUSED,
955 enum machine_mode mode)
956{
957 if (VECTOR_MODE_P (mode))
b175b679 958 return aarch64_tune_params.vec_reassoc_width;
cee66c68 959 if (INTEGRAL_MODE_P (mode))
b175b679 960 return aarch64_tune_params.int_reassoc_width;
cee66c68 961 if (FLOAT_MODE_P (mode))
b175b679 962 return aarch64_tune_params.fp_reassoc_width;
cee66c68
WD
963 return 1;
964}
965
43e9d192
IB
966/* Provide a mapping from gcc register numbers to dwarf register numbers. */
967unsigned
968aarch64_dbx_register_number (unsigned regno)
969{
970 if (GP_REGNUM_P (regno))
971 return AARCH64_DWARF_R0 + regno - R0_REGNUM;
972 else if (regno == SP_REGNUM)
973 return AARCH64_DWARF_SP;
974 else if (FP_REGNUM_P (regno))
975 return AARCH64_DWARF_V0 + regno - V0_REGNUM;
976
977 /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
978 equivalent DWARF register. */
979 return DWARF_FRAME_REGISTERS;
980}
981
982/* Return TRUE if MODE is any of the large INT modes. */
983static bool
ef4bddc2 984aarch64_vect_struct_mode_p (machine_mode mode)
43e9d192
IB
985{
986 return mode == OImode || mode == CImode || mode == XImode;
987}
988
989/* Return TRUE if MODE is any of the vector modes. */
990static bool
ef4bddc2 991aarch64_vector_mode_p (machine_mode mode)
43e9d192
IB
992{
993 return aarch64_vector_mode_supported_p (mode)
994 || aarch64_vect_struct_mode_p (mode);
995}
996
997/* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
998static bool
ef4bddc2 999aarch64_array_mode_supported_p (machine_mode mode,
43e9d192
IB
1000 unsigned HOST_WIDE_INT nelems)
1001{
1002 if (TARGET_SIMD
635e66fe
AL
1003 && (AARCH64_VALID_SIMD_QREG_MODE (mode)
1004 || AARCH64_VALID_SIMD_DREG_MODE (mode))
43e9d192
IB
1005 && (nelems >= 2 && nelems <= 4))
1006 return true;
1007
1008 return false;
1009}
1010
1011/* Implement HARD_REGNO_NREGS. */
1012
1013int
ef4bddc2 1014aarch64_hard_regno_nregs (unsigned regno, machine_mode mode)
43e9d192
IB
1015{
1016 switch (aarch64_regno_regclass (regno))
1017 {
1018 case FP_REGS:
1019 case FP_LO_REGS:
1020 return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
1021 default:
1022 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1023 }
1024 gcc_unreachable ();
1025}
1026
1027/* Implement HARD_REGNO_MODE_OK. */
1028
1029int
ef4bddc2 1030aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
43e9d192
IB
1031{
1032 if (GET_MODE_CLASS (mode) == MODE_CC)
1033 return regno == CC_REGNUM;
1034
9259db42
YZ
1035 if (regno == SP_REGNUM)
1036 /* The purpose of comparing with ptr_mode is to support the
1037 global register variable associated with the stack pointer
1038 register via the syntax of asm ("wsp") in ILP32. */
1039 return mode == Pmode || mode == ptr_mode;
1040
1041 if (regno == FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM)
43e9d192
IB
1042 return mode == Pmode;
1043
1044 if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
1045 return 1;
1046
1047 if (FP_REGNUM_P (regno))
1048 {
1049 if (aarch64_vect_struct_mode_p (mode))
1050 return
1051 (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
1052 else
1053 return 1;
1054 }
1055
1056 return 0;
1057}
1058
73d9ac6a 1059/* Implement HARD_REGNO_CALLER_SAVE_MODE. */
ef4bddc2 1060machine_mode
73d9ac6a 1061aarch64_hard_regno_caller_save_mode (unsigned regno, unsigned nregs,
ef4bddc2 1062 machine_mode mode)
73d9ac6a
IB
1063{
1064 /* Handle modes that fit within single registers. */
1065 if (nregs == 1 && GET_MODE_SIZE (mode) <= 16)
1066 {
1067 if (GET_MODE_SIZE (mode) >= 4)
1068 return mode;
1069 else
1070 return SImode;
1071 }
1072 /* Fall back to generic for multi-reg and very large modes. */
1073 else
1074 return choose_hard_reg_mode (regno, nregs, false);
1075}
1076
43e9d192
IB
1077/* Return true if calls to DECL should be treated as
1078 long-calls (ie called via a register). */
1079static bool
1080aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
1081{
1082 return false;
1083}
1084
1085/* Return true if calls to symbol-ref SYM should be treated as
1086 long-calls (ie called via a register). */
1087bool
1088aarch64_is_long_call_p (rtx sym)
1089{
1090 return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
1091}
1092
b60d63cb
JW
1093/* Return true if calls to symbol-ref SYM should not go through
1094 plt stubs. */
1095
1096bool
1097aarch64_is_noplt_call_p (rtx sym)
1098{
1099 const_tree decl = SYMBOL_REF_DECL (sym);
1100
1101 if (flag_pic
1102 && decl
1103 && (!flag_plt
1104 || lookup_attribute ("noplt", DECL_ATTRIBUTES (decl)))
1105 && !targetm.binds_local_p (decl))
1106 return true;
1107
1108 return false;
1109}
1110
43e9d192
IB
1111/* Return true if the offsets to a zero/sign-extract operation
1112 represent an expression that matches an extend operation. The
1113 operands represent the paramters from
1114
4745e701 1115 (extract:MODE (mult (reg) (MULT_IMM)) (EXTRACT_IMM) (const_int 0)). */
43e9d192 1116bool
ef4bddc2 1117aarch64_is_extend_from_extract (machine_mode mode, rtx mult_imm,
43e9d192
IB
1118 rtx extract_imm)
1119{
1120 HOST_WIDE_INT mult_val, extract_val;
1121
1122 if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
1123 return false;
1124
1125 mult_val = INTVAL (mult_imm);
1126 extract_val = INTVAL (extract_imm);
1127
1128 if (extract_val > 8
1129 && extract_val < GET_MODE_BITSIZE (mode)
1130 && exact_log2 (extract_val & ~7) > 0
1131 && (extract_val & 7) <= 4
1132 && mult_val == (1 << (extract_val & 7)))
1133 return true;
1134
1135 return false;
1136}
1137
1138/* Emit an insn that's a simple single-set. Both the operands must be
1139 known to be valid. */
1140inline static rtx
1141emit_set_insn (rtx x, rtx y)
1142{
f7df4a84 1143 return emit_insn (gen_rtx_SET (x, y));
43e9d192
IB
1144}
1145
1146/* X and Y are two things to compare using CODE. Emit the compare insn and
1147 return the rtx for register 0 in the proper mode. */
1148rtx
1149aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
1150{
ef4bddc2 1151 machine_mode mode = SELECT_CC_MODE (code, x, y);
43e9d192
IB
1152 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
1153
1154 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
1155 return cc_reg;
1156}
1157
1158/* Build the SYMBOL_REF for __tls_get_addr. */
1159
1160static GTY(()) rtx tls_get_addr_libfunc;
1161
1162rtx
1163aarch64_tls_get_addr (void)
1164{
1165 if (!tls_get_addr_libfunc)
1166 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
1167 return tls_get_addr_libfunc;
1168}
1169
1170/* Return the TLS model to use for ADDR. */
1171
1172static enum tls_model
1173tls_symbolic_operand_type (rtx addr)
1174{
1175 enum tls_model tls_kind = TLS_MODEL_NONE;
1176 rtx sym, addend;
1177
1178 if (GET_CODE (addr) == CONST)
1179 {
1180 split_const (addr, &sym, &addend);
1181 if (GET_CODE (sym) == SYMBOL_REF)
1182 tls_kind = SYMBOL_REF_TLS_MODEL (sym);
1183 }
1184 else if (GET_CODE (addr) == SYMBOL_REF)
1185 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
1186
1187 return tls_kind;
1188}
1189
1190/* We'll allow lo_sum's in addresses in our legitimate addresses
1191 so that combine would take care of combining addresses where
1192 necessary, but for generation purposes, we'll generate the address
1193 as :
1194 RTL Absolute
1195 tmp = hi (symbol_ref); adrp x1, foo
1196 dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
1197 nop
1198
1199 PIC TLS
1200 adrp x1, :got:foo adrp tmp, :tlsgd:foo
1201 ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
1202 bl __tls_get_addr
1203 nop
1204
1205 Load TLS symbol, depending on TLS mechanism and TLS access model.
1206
1207 Global Dynamic - Traditional TLS:
1208 adrp tmp, :tlsgd:imm
1209 add dest, tmp, #:tlsgd_lo12:imm
1210 bl __tls_get_addr
1211
1212 Global Dynamic - TLS Descriptors:
1213 adrp dest, :tlsdesc:imm
1214 ldr tmp, [dest, #:tlsdesc_lo12:imm]
1215 add dest, dest, #:tlsdesc_lo12:imm
1216 blr tmp
1217 mrs tp, tpidr_el0
1218 add dest, dest, tp
1219
1220 Initial Exec:
1221 mrs tp, tpidr_el0
1222 adrp tmp, :gottprel:imm
1223 ldr dest, [tmp, #:gottprel_lo12:imm]
1224 add dest, dest, tp
1225
1226 Local Exec:
1227 mrs tp, tpidr_el0
0699caae
RL
1228 add t0, tp, #:tprel_hi12:imm, lsl #12
1229 add t0, t0, #:tprel_lo12_nc:imm
43e9d192
IB
1230*/
1231
1232static void
1233aarch64_load_symref_appropriately (rtx dest, rtx imm,
1234 enum aarch64_symbol_type type)
1235{
1236 switch (type)
1237 {
1238 case SYMBOL_SMALL_ABSOLUTE:
1239 {
28514dda 1240 /* In ILP32, the mode of dest can be either SImode or DImode. */
43e9d192 1241 rtx tmp_reg = dest;
ef4bddc2 1242 machine_mode mode = GET_MODE (dest);
28514dda
YZ
1243
1244 gcc_assert (mode == Pmode || mode == ptr_mode);
1245
43e9d192 1246 if (can_create_pseudo_p ())
28514dda 1247 tmp_reg = gen_reg_rtx (mode);
43e9d192 1248
28514dda 1249 emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
43e9d192
IB
1250 emit_insn (gen_add_losym (dest, tmp_reg, imm));
1251 return;
1252 }
1253
a5350ddc 1254 case SYMBOL_TINY_ABSOLUTE:
f7df4a84 1255 emit_insn (gen_rtx_SET (dest, imm));
a5350ddc
CSS
1256 return;
1257
1b1e81f8
JW
1258 case SYMBOL_SMALL_GOT_28K:
1259 {
1260 machine_mode mode = GET_MODE (dest);
1261 rtx gp_rtx = pic_offset_table_rtx;
53021678
JW
1262 rtx insn;
1263 rtx mem;
1b1e81f8
JW
1264
1265 /* NOTE: pic_offset_table_rtx can be NULL_RTX, because we can reach
1266 here before rtl expand. Tree IVOPT will generate rtl pattern to
1267 decide rtx costs, in which case pic_offset_table_rtx is not
1268 initialized. For that case no need to generate the first adrp
026c3cfd 1269 instruction as the final cost for global variable access is
1b1e81f8
JW
1270 one instruction. */
1271 if (gp_rtx != NULL)
1272 {
1273 /* -fpic for -mcmodel=small allow 32K GOT table size (but we are
1274 using the page base as GOT base, the first page may be wasted,
1275 in the worst scenario, there is only 28K space for GOT).
1276
1277 The generate instruction sequence for accessing global variable
1278 is:
1279
a3957742 1280 ldr reg, [pic_offset_table_rtx, #:gotpage_lo15:sym]
1b1e81f8
JW
1281
1282 Only one instruction needed. But we must initialize
1283 pic_offset_table_rtx properly. We generate initialize insn for
1284 every global access, and allow CSE to remove all redundant.
1285
1286 The final instruction sequences will look like the following
1287 for multiply global variables access.
1288
a3957742 1289 adrp pic_offset_table_rtx, _GLOBAL_OFFSET_TABLE_
1b1e81f8 1290
a3957742
JW
1291 ldr reg, [pic_offset_table_rtx, #:gotpage_lo15:sym1]
1292 ldr reg, [pic_offset_table_rtx, #:gotpage_lo15:sym2]
1293 ldr reg, [pic_offset_table_rtx, #:gotpage_lo15:sym3]
1294 ... */
1b1e81f8
JW
1295
1296 rtx s = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1297 crtl->uses_pic_offset_table = 1;
1298 emit_move_insn (gp_rtx, gen_rtx_HIGH (Pmode, s));
1299
1300 if (mode != GET_MODE (gp_rtx))
1301 gp_rtx = simplify_gen_subreg (mode, gp_rtx, GET_MODE (gp_rtx), 0);
1302 }
1303
1304 if (mode == ptr_mode)
1305 {
1306 if (mode == DImode)
53021678 1307 insn = gen_ldr_got_small_28k_di (dest, gp_rtx, imm);
1b1e81f8 1308 else
53021678
JW
1309 insn = gen_ldr_got_small_28k_si (dest, gp_rtx, imm);
1310
1311 mem = XVECEXP (SET_SRC (insn), 0, 0);
1b1e81f8
JW
1312 }
1313 else
1314 {
1315 gcc_assert (mode == Pmode);
53021678
JW
1316
1317 insn = gen_ldr_got_small_28k_sidi (dest, gp_rtx, imm);
1318 mem = XVECEXP (XEXP (SET_SRC (insn), 0), 0, 0);
1b1e81f8
JW
1319 }
1320
53021678
JW
1321 /* The operand is expected to be MEM. Whenever the related insn
1322 pattern changed, above code which calculate mem should be
1323 updated. */
1324 gcc_assert (GET_CODE (mem) == MEM);
1325 MEM_READONLY_P (mem) = 1;
1326 MEM_NOTRAP_P (mem) = 1;
1327 emit_insn (insn);
1b1e81f8
JW
1328 return;
1329 }
1330
6642bdb4 1331 case SYMBOL_SMALL_GOT_4G:
43e9d192 1332 {
28514dda
YZ
1333 /* In ILP32, the mode of dest can be either SImode or DImode,
1334 while the got entry is always of SImode size. The mode of
1335 dest depends on how dest is used: if dest is assigned to a
1336 pointer (e.g. in the memory), it has SImode; it may have
1337 DImode if dest is dereferenced to access the memeory.
1338 This is why we have to handle three different ldr_got_small
1339 patterns here (two patterns for ILP32). */
53021678
JW
1340
1341 rtx insn;
1342 rtx mem;
43e9d192 1343 rtx tmp_reg = dest;
ef4bddc2 1344 machine_mode mode = GET_MODE (dest);
28514dda 1345
43e9d192 1346 if (can_create_pseudo_p ())
28514dda
YZ
1347 tmp_reg = gen_reg_rtx (mode);
1348
1349 emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
1350 if (mode == ptr_mode)
1351 {
1352 if (mode == DImode)
53021678 1353 insn = gen_ldr_got_small_di (dest, tmp_reg, imm);
28514dda 1354 else
53021678
JW
1355 insn = gen_ldr_got_small_si (dest, tmp_reg, imm);
1356
1357 mem = XVECEXP (SET_SRC (insn), 0, 0);
28514dda
YZ
1358 }
1359 else
1360 {
1361 gcc_assert (mode == Pmode);
53021678
JW
1362
1363 insn = gen_ldr_got_small_sidi (dest, tmp_reg, imm);
1364 mem = XVECEXP (XEXP (SET_SRC (insn), 0), 0, 0);
28514dda
YZ
1365 }
1366
53021678
JW
1367 gcc_assert (GET_CODE (mem) == MEM);
1368 MEM_READONLY_P (mem) = 1;
1369 MEM_NOTRAP_P (mem) = 1;
1370 emit_insn (insn);
43e9d192
IB
1371 return;
1372 }
1373
1374 case SYMBOL_SMALL_TLSGD:
1375 {
5d8a22a5 1376 rtx_insn *insns;
43e9d192
IB
1377 rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
1378
1379 start_sequence ();
78607708 1380 aarch64_emit_call_insn (gen_tlsgd_small (result, imm));
43e9d192
IB
1381 insns = get_insns ();
1382 end_sequence ();
1383
1384 RTL_CONST_CALL_P (insns) = 1;
1385 emit_libcall_block (insns, dest, result, imm);
1386 return;
1387 }
1388
1389 case SYMBOL_SMALL_TLSDESC:
1390 {
ef4bddc2 1391 machine_mode mode = GET_MODE (dest);
621ad2de 1392 rtx x0 = gen_rtx_REG (mode, R0_REGNUM);
43e9d192
IB
1393 rtx tp;
1394
621ad2de
AP
1395 gcc_assert (mode == Pmode || mode == ptr_mode);
1396
2876a13f
JW
1397 /* In ILP32, the got entry is always of SImode size. Unlike
1398 small GOT, the dest is fixed at reg 0. */
1399 if (TARGET_ILP32)
1400 emit_insn (gen_tlsdesc_small_si (imm));
621ad2de 1401 else
2876a13f 1402 emit_insn (gen_tlsdesc_small_di (imm));
43e9d192 1403 tp = aarch64_load_tp (NULL);
621ad2de
AP
1404
1405 if (mode != Pmode)
1406 tp = gen_lowpart (mode, tp);
1407
2876a13f 1408 emit_insn (gen_rtx_SET (dest, gen_rtx_PLUS (mode, tp, x0)));
43e9d192
IB
1409 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
1410 return;
1411 }
1412
79496620 1413 case SYMBOL_SMALL_TLSIE:
43e9d192 1414 {
621ad2de
AP
1415 /* In ILP32, the mode of dest can be either SImode or DImode,
1416 while the got entry is always of SImode size. The mode of
1417 dest depends on how dest is used: if dest is assigned to a
1418 pointer (e.g. in the memory), it has SImode; it may have
1419 DImode if dest is dereferenced to access the memeory.
1420 This is why we have to handle three different tlsie_small
1421 patterns here (two patterns for ILP32). */
ef4bddc2 1422 machine_mode mode = GET_MODE (dest);
621ad2de 1423 rtx tmp_reg = gen_reg_rtx (mode);
43e9d192 1424 rtx tp = aarch64_load_tp (NULL);
621ad2de
AP
1425
1426 if (mode == ptr_mode)
1427 {
1428 if (mode == DImode)
1429 emit_insn (gen_tlsie_small_di (tmp_reg, imm));
1430 else
1431 {
1432 emit_insn (gen_tlsie_small_si (tmp_reg, imm));
1433 tp = gen_lowpart (mode, tp);
1434 }
1435 }
1436 else
1437 {
1438 gcc_assert (mode == Pmode);
1439 emit_insn (gen_tlsie_small_sidi (tmp_reg, imm));
1440 }
1441
f7df4a84 1442 emit_insn (gen_rtx_SET (dest, gen_rtx_PLUS (mode, tp, tmp_reg)));
43e9d192
IB
1443 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
1444 return;
1445 }
1446
cbf5629e 1447 case SYMBOL_TLSLE12:
d18ba284 1448 case SYMBOL_TLSLE24:
cbf5629e
JW
1449 case SYMBOL_TLSLE32:
1450 case SYMBOL_TLSLE48:
43e9d192 1451 {
cbf5629e 1452 machine_mode mode = GET_MODE (dest);
43e9d192 1453 rtx tp = aarch64_load_tp (NULL);
e6f7f0e9 1454
cbf5629e
JW
1455 if (mode != Pmode)
1456 tp = gen_lowpart (mode, tp);
1457
1458 switch (type)
1459 {
1460 case SYMBOL_TLSLE12:
1461 emit_insn ((mode == DImode ? gen_tlsle12_di : gen_tlsle12_si)
1462 (dest, tp, imm));
1463 break;
1464 case SYMBOL_TLSLE24:
1465 emit_insn ((mode == DImode ? gen_tlsle24_di : gen_tlsle24_si)
1466 (dest, tp, imm));
1467 break;
1468 case SYMBOL_TLSLE32:
1469 emit_insn ((mode == DImode ? gen_tlsle32_di : gen_tlsle32_si)
1470 (dest, imm));
1471 emit_insn ((mode == DImode ? gen_adddi3 : gen_addsi3)
1472 (dest, dest, tp));
1473 break;
1474 case SYMBOL_TLSLE48:
1475 emit_insn ((mode == DImode ? gen_tlsle48_di : gen_tlsle48_si)
1476 (dest, imm));
1477 emit_insn ((mode == DImode ? gen_adddi3 : gen_addsi3)
1478 (dest, dest, tp));
1479 break;
1480 default:
1481 gcc_unreachable ();
1482 }
e6f7f0e9 1483
43e9d192
IB
1484 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
1485 return;
1486 }
1487
87dd8ab0
MS
1488 case SYMBOL_TINY_GOT:
1489 emit_insn (gen_ldr_got_tiny (dest, imm));
1490 return;
1491
5ae7caad
JW
1492 case SYMBOL_TINY_TLSIE:
1493 {
1494 machine_mode mode = GET_MODE (dest);
1495 rtx tp = aarch64_load_tp (NULL);
1496
1497 if (mode == ptr_mode)
1498 {
1499 if (mode == DImode)
1500 emit_insn (gen_tlsie_tiny_di (dest, imm, tp));
1501 else
1502 {
1503 tp = gen_lowpart (mode, tp);
1504 emit_insn (gen_tlsie_tiny_si (dest, imm, tp));
1505 }
1506 }
1507 else
1508 {
1509 gcc_assert (mode == Pmode);
1510 emit_insn (gen_tlsie_tiny_sidi (dest, imm, tp));
1511 }
1512
1513 set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
1514 return;
1515 }
1516
43e9d192
IB
1517 default:
1518 gcc_unreachable ();
1519 }
1520}
1521
1522/* Emit a move from SRC to DEST. Assume that the move expanders can
1523 handle all moves if !can_create_pseudo_p (). The distinction is
1524 important because, unlike emit_move_insn, the move expanders know
1525 how to force Pmode objects into the constant pool even when the
1526 constant pool address is not itself legitimate. */
1527static rtx
1528aarch64_emit_move (rtx dest, rtx src)
1529{
1530 return (can_create_pseudo_p ()
1531 ? emit_move_insn (dest, src)
1532 : emit_move_insn_1 (dest, src));
1533}
1534
030d03b8
RE
1535/* Split a 128-bit move operation into two 64-bit move operations,
1536 taking care to handle partial overlap of register to register
1537 copies. Special cases are needed when moving between GP regs and
1538 FP regs. SRC can be a register, constant or memory; DST a register
1539 or memory. If either operand is memory it must not have any side
1540 effects. */
43e9d192
IB
1541void
1542aarch64_split_128bit_move (rtx dst, rtx src)
1543{
030d03b8
RE
1544 rtx dst_lo, dst_hi;
1545 rtx src_lo, src_hi;
43e9d192 1546
ef4bddc2 1547 machine_mode mode = GET_MODE (dst);
12dc6974 1548
030d03b8
RE
1549 gcc_assert (mode == TImode || mode == TFmode);
1550 gcc_assert (!(side_effects_p (src) || side_effects_p (dst)));
1551 gcc_assert (mode == GET_MODE (src) || GET_MODE (src) == VOIDmode);
43e9d192
IB
1552
1553 if (REG_P (dst) && REG_P (src))
1554 {
030d03b8
RE
1555 int src_regno = REGNO (src);
1556 int dst_regno = REGNO (dst);
43e9d192 1557
030d03b8 1558 /* Handle FP <-> GP regs. */
43e9d192
IB
1559 if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
1560 {
030d03b8
RE
1561 src_lo = gen_lowpart (word_mode, src);
1562 src_hi = gen_highpart (word_mode, src);
1563
1564 if (mode == TImode)
1565 {
1566 emit_insn (gen_aarch64_movtilow_di (dst, src_lo));
1567 emit_insn (gen_aarch64_movtihigh_di (dst, src_hi));
1568 }
1569 else
1570 {
1571 emit_insn (gen_aarch64_movtflow_di (dst, src_lo));
1572 emit_insn (gen_aarch64_movtfhigh_di (dst, src_hi));
1573 }
1574 return;
43e9d192
IB
1575 }
1576 else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
1577 {
030d03b8
RE
1578 dst_lo = gen_lowpart (word_mode, dst);
1579 dst_hi = gen_highpart (word_mode, dst);
1580
1581 if (mode == TImode)
1582 {
1583 emit_insn (gen_aarch64_movdi_tilow (dst_lo, src));
1584 emit_insn (gen_aarch64_movdi_tihigh (dst_hi, src));
1585 }
1586 else
1587 {
1588 emit_insn (gen_aarch64_movdi_tflow (dst_lo, src));
1589 emit_insn (gen_aarch64_movdi_tfhigh (dst_hi, src));
1590 }
1591 return;
43e9d192 1592 }
43e9d192
IB
1593 }
1594
030d03b8
RE
1595 dst_lo = gen_lowpart (word_mode, dst);
1596 dst_hi = gen_highpart (word_mode, dst);
1597 src_lo = gen_lowpart (word_mode, src);
1598 src_hi = gen_highpart_mode (word_mode, mode, src);
1599
1600 /* At most one pairing may overlap. */
1601 if (reg_overlap_mentioned_p (dst_lo, src_hi))
1602 {
1603 aarch64_emit_move (dst_hi, src_hi);
1604 aarch64_emit_move (dst_lo, src_lo);
1605 }
1606 else
1607 {
1608 aarch64_emit_move (dst_lo, src_lo);
1609 aarch64_emit_move (dst_hi, src_hi);
1610 }
43e9d192
IB
1611}
1612
1613bool
1614aarch64_split_128bit_move_p (rtx dst, rtx src)
1615{
1616 return (! REG_P (src)
1617 || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
1618}
1619
8b033a8a
SN
1620/* Split a complex SIMD combine. */
1621
1622void
1623aarch64_split_simd_combine (rtx dst, rtx src1, rtx src2)
1624{
ef4bddc2
RS
1625 machine_mode src_mode = GET_MODE (src1);
1626 machine_mode dst_mode = GET_MODE (dst);
8b033a8a
SN
1627
1628 gcc_assert (VECTOR_MODE_P (dst_mode));
1629
1630 if (REG_P (dst) && REG_P (src1) && REG_P (src2))
1631 {
1632 rtx (*gen) (rtx, rtx, rtx);
1633
1634 switch (src_mode)
1635 {
1636 case V8QImode:
1637 gen = gen_aarch64_simd_combinev8qi;
1638 break;
1639 case V4HImode:
1640 gen = gen_aarch64_simd_combinev4hi;
1641 break;
1642 case V2SImode:
1643 gen = gen_aarch64_simd_combinev2si;
1644 break;
7c369485
AL
1645 case V4HFmode:
1646 gen = gen_aarch64_simd_combinev4hf;
1647 break;
8b033a8a
SN
1648 case V2SFmode:
1649 gen = gen_aarch64_simd_combinev2sf;
1650 break;
1651 case DImode:
1652 gen = gen_aarch64_simd_combinedi;
1653 break;
1654 case DFmode:
1655 gen = gen_aarch64_simd_combinedf;
1656 break;
1657 default:
1658 gcc_unreachable ();
1659 }
1660
1661 emit_insn (gen (dst, src1, src2));
1662 return;
1663 }
1664}
1665
fd4842cd
SN
1666/* Split a complex SIMD move. */
1667
1668void
1669aarch64_split_simd_move (rtx dst, rtx src)
1670{
ef4bddc2
RS
1671 machine_mode src_mode = GET_MODE (src);
1672 machine_mode dst_mode = GET_MODE (dst);
fd4842cd
SN
1673
1674 gcc_assert (VECTOR_MODE_P (dst_mode));
1675
1676 if (REG_P (dst) && REG_P (src))
1677 {
c59b7e28
SN
1678 rtx (*gen) (rtx, rtx);
1679
fd4842cd
SN
1680 gcc_assert (VECTOR_MODE_P (src_mode));
1681
1682 switch (src_mode)
1683 {
1684 case V16QImode:
c59b7e28 1685 gen = gen_aarch64_split_simd_movv16qi;
fd4842cd
SN
1686 break;
1687 case V8HImode:
c59b7e28 1688 gen = gen_aarch64_split_simd_movv8hi;
fd4842cd
SN
1689 break;
1690 case V4SImode:
c59b7e28 1691 gen = gen_aarch64_split_simd_movv4si;
fd4842cd
SN
1692 break;
1693 case V2DImode:
c59b7e28 1694 gen = gen_aarch64_split_simd_movv2di;
fd4842cd 1695 break;
71a11456
AL
1696 case V8HFmode:
1697 gen = gen_aarch64_split_simd_movv8hf;
1698 break;
fd4842cd 1699 case V4SFmode:
c59b7e28 1700 gen = gen_aarch64_split_simd_movv4sf;
fd4842cd
SN
1701 break;
1702 case V2DFmode:
c59b7e28 1703 gen = gen_aarch64_split_simd_movv2df;
fd4842cd
SN
1704 break;
1705 default:
1706 gcc_unreachable ();
1707 }
c59b7e28
SN
1708
1709 emit_insn (gen (dst, src));
fd4842cd
SN
1710 return;
1711 }
1712}
1713
ef22810a
RH
1714bool
1715aarch64_zero_extend_const_eq (machine_mode xmode, rtx x,
1716 machine_mode ymode, rtx y)
1717{
1718 rtx r = simplify_const_unary_operation (ZERO_EXTEND, xmode, y, ymode);
1719 gcc_assert (r != NULL);
1720 return rtx_equal_p (x, r);
1721}
1722
1723
43e9d192 1724static rtx
ef4bddc2 1725aarch64_force_temporary (machine_mode mode, rtx x, rtx value)
43e9d192
IB
1726{
1727 if (can_create_pseudo_p ())
e18b4a81 1728 return force_reg (mode, value);
43e9d192
IB
1729 else
1730 {
1731 x = aarch64_emit_move (x, value);
1732 return x;
1733 }
1734}
1735
1736
1737static rtx
ef4bddc2 1738aarch64_add_offset (machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
43e9d192 1739{
9c023bf0 1740 if (!aarch64_plus_immediate (GEN_INT (offset), mode))
43e9d192
IB
1741 {
1742 rtx high;
1743 /* Load the full offset into a register. This
1744 might be improvable in the future. */
1745 high = GEN_INT (offset);
1746 offset = 0;
e18b4a81
YZ
1747 high = aarch64_force_temporary (mode, temp, high);
1748 reg = aarch64_force_temporary (mode, temp,
1749 gen_rtx_PLUS (mode, high, reg));
43e9d192
IB
1750 }
1751 return plus_constant (mode, reg, offset);
1752}
1753
82614948
RR
1754static int
1755aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
1756 machine_mode mode)
43e9d192 1757{
43e9d192 1758 int i;
9a4865db
WD
1759 unsigned HOST_WIDE_INT val, val2, mask;
1760 int one_match, zero_match;
1761 int num_insns;
43e9d192 1762
9a4865db
WD
1763 val = INTVAL (imm);
1764
1765 if (aarch64_move_imm (val, mode))
43e9d192 1766 {
82614948 1767 if (generate)
f7df4a84 1768 emit_insn (gen_rtx_SET (dest, imm));
9a4865db 1769 return 1;
43e9d192
IB
1770 }
1771
9a4865db 1772 if ((val >> 32) == 0 || mode == SImode)
43e9d192 1773 {
82614948
RR
1774 if (generate)
1775 {
9a4865db
WD
1776 emit_insn (gen_rtx_SET (dest, GEN_INT (val & 0xffff)));
1777 if (mode == SImode)
1778 emit_insn (gen_insv_immsi (dest, GEN_INT (16),
1779 GEN_INT ((val >> 16) & 0xffff)));
1780 else
1781 emit_insn (gen_insv_immdi (dest, GEN_INT (16),
1782 GEN_INT ((val >> 16) & 0xffff)));
82614948 1783 }
9a4865db 1784 return 2;
43e9d192
IB
1785 }
1786
1787 /* Remaining cases are all for DImode. */
1788
43e9d192 1789 mask = 0xffff;
9a4865db
WD
1790 zero_match = ((val & mask) == 0) + ((val & (mask << 16)) == 0) +
1791 ((val & (mask << 32)) == 0) + ((val & (mask << 48)) == 0);
1792 one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
1793 ((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
43e9d192 1794
62c8d76c 1795 if (zero_match != 2 && one_match != 2)
43e9d192 1796 {
62c8d76c
WD
1797 /* Try emitting a bitmask immediate with a movk replacing 16 bits.
1798 For a 64-bit bitmask try whether changing 16 bits to all ones or
1799 zeroes creates a valid bitmask. To check any repeated bitmask,
1800 try using 16 bits from the other 32-bit half of val. */
43e9d192 1801
62c8d76c 1802 for (i = 0; i < 64; i += 16, mask <<= 16)
43e9d192 1803 {
62c8d76c
WD
1804 val2 = val & ~mask;
1805 if (val2 != val && aarch64_bitmask_imm (val2, mode))
1806 break;
1807 val2 = val | mask;
1808 if (val2 != val && aarch64_bitmask_imm (val2, mode))
1809 break;
1810 val2 = val2 & ~mask;
1811 val2 = val2 | (((val2 >> 32) | (val2 << 32)) & mask);
1812 if (val2 != val && aarch64_bitmask_imm (val2, mode))
1813 break;
43e9d192 1814 }
62c8d76c 1815 if (i != 64)
43e9d192 1816 {
62c8d76c 1817 if (generate)
43e9d192 1818 {
62c8d76c
WD
1819 emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
1820 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
9a4865db 1821 GEN_INT ((val >> i) & 0xffff)));
43e9d192 1822 }
1312b1ba 1823 return 2;
43e9d192
IB
1824 }
1825 }
1826
9a4865db
WD
1827 /* Generate 2-4 instructions, skipping 16 bits of all zeroes or ones which
1828 are emitted by the initial mov. If one_match > zero_match, skip set bits,
1829 otherwise skip zero bits. */
2c274197 1830
9a4865db 1831 num_insns = 1;
43e9d192 1832 mask = 0xffff;
9a4865db
WD
1833 val2 = one_match > zero_match ? ~val : val;
1834 i = (val2 & mask) != 0 ? 0 : (val2 & (mask << 16)) != 0 ? 16 : 32;
1835
1836 if (generate)
1837 emit_insn (gen_rtx_SET (dest, GEN_INT (one_match > zero_match
1838 ? (val | ~(mask << i))
1839 : (val & (mask << i)))));
1840 for (i += 16; i < 64; i += 16)
43e9d192 1841 {
9a4865db
WD
1842 if ((val2 & (mask << i)) == 0)
1843 continue;
1844 if (generate)
1845 emit_insn (gen_insv_immdi (dest, GEN_INT (i),
1846 GEN_INT ((val >> i) & 0xffff)));
1847 num_insns ++;
82614948
RR
1848 }
1849
1850 return num_insns;
1851}
1852
1853
1854void
1855aarch64_expand_mov_immediate (rtx dest, rtx imm)
1856{
1857 machine_mode mode = GET_MODE (dest);
1858
1859 gcc_assert (mode == SImode || mode == DImode);
1860
1861 /* Check on what type of symbol it is. */
1862 if (GET_CODE (imm) == SYMBOL_REF
1863 || GET_CODE (imm) == LABEL_REF
1864 || GET_CODE (imm) == CONST)
1865 {
1866 rtx mem, base, offset;
1867 enum aarch64_symbol_type sty;
1868
1869 /* If we have (const (plus symbol offset)), separate out the offset
1870 before we start classifying the symbol. */
1871 split_const (imm, &base, &offset);
1872
a6e0bfa7 1873 sty = aarch64_classify_symbol (base, offset);
82614948
RR
1874 switch (sty)
1875 {
1876 case SYMBOL_FORCE_TO_MEM:
1877 if (offset != const0_rtx
1878 && targetm.cannot_force_const_mem (mode, imm))
1879 {
1880 gcc_assert (can_create_pseudo_p ());
1881 base = aarch64_force_temporary (mode, dest, base);
1882 base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
1883 aarch64_emit_move (dest, base);
1884 return;
1885 }
b4f50fd4 1886
82614948
RR
1887 mem = force_const_mem (ptr_mode, imm);
1888 gcc_assert (mem);
b4f50fd4
RR
1889
1890 /* If we aren't generating PC relative literals, then
1891 we need to expand the literal pool access carefully.
1892 This is something that needs to be done in a number
1893 of places, so could well live as a separate function. */
9ee6540a 1894 if (!aarch64_pcrelative_literal_loads)
b4f50fd4
RR
1895 {
1896 gcc_assert (can_create_pseudo_p ());
1897 base = gen_reg_rtx (ptr_mode);
1898 aarch64_expand_mov_immediate (base, XEXP (mem, 0));
1899 mem = gen_rtx_MEM (ptr_mode, base);
1900 }
1901
82614948
RR
1902 if (mode != ptr_mode)
1903 mem = gen_rtx_ZERO_EXTEND (mode, mem);
b4f50fd4 1904
f7df4a84 1905 emit_insn (gen_rtx_SET (dest, mem));
b4f50fd4 1906
82614948
RR
1907 return;
1908
1909 case SYMBOL_SMALL_TLSGD:
1910 case SYMBOL_SMALL_TLSDESC:
79496620 1911 case SYMBOL_SMALL_TLSIE:
1b1e81f8 1912 case SYMBOL_SMALL_GOT_28K:
6642bdb4 1913 case SYMBOL_SMALL_GOT_4G:
82614948 1914 case SYMBOL_TINY_GOT:
5ae7caad 1915 case SYMBOL_TINY_TLSIE:
82614948
RR
1916 if (offset != const0_rtx)
1917 {
1918 gcc_assert(can_create_pseudo_p ());
1919 base = aarch64_force_temporary (mode, dest, base);
1920 base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
1921 aarch64_emit_move (dest, base);
1922 return;
1923 }
1924 /* FALLTHRU */
1925
82614948
RR
1926 case SYMBOL_SMALL_ABSOLUTE:
1927 case SYMBOL_TINY_ABSOLUTE:
cbf5629e 1928 case SYMBOL_TLSLE12:
d18ba284 1929 case SYMBOL_TLSLE24:
cbf5629e
JW
1930 case SYMBOL_TLSLE32:
1931 case SYMBOL_TLSLE48:
82614948
RR
1932 aarch64_load_symref_appropriately (dest, imm, sty);
1933 return;
1934
1935 default:
1936 gcc_unreachable ();
1937 }
1938 }
1939
1940 if (!CONST_INT_P (imm))
1941 {
1942 if (GET_CODE (imm) == HIGH)
f7df4a84 1943 emit_insn (gen_rtx_SET (dest, imm));
82614948
RR
1944 else
1945 {
1946 rtx mem = force_const_mem (mode, imm);
1947 gcc_assert (mem);
f7df4a84 1948 emit_insn (gen_rtx_SET (dest, mem));
43e9d192 1949 }
82614948
RR
1950
1951 return;
43e9d192 1952 }
82614948
RR
1953
1954 aarch64_internal_mov_immediate (dest, imm, true, GET_MODE (dest));
43e9d192
IB
1955}
1956
5be6b295
WD
1957/* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a
1958 temporary value if necessary. FRAME_RELATED_P should be true if
1959 the RTX_FRAME_RELATED flag should be set and CFA adjustments added
1960 to the generated instructions. If SCRATCHREG is known to hold
1961 abs (delta), EMIT_MOVE_IMM can be set to false to avoid emitting the
1962 immediate again.
1963
1964 Since this function may be used to adjust the stack pointer, we must
1965 ensure that it cannot cause transient stack deallocation (for example
1966 by first incrementing SP and then decrementing when adjusting by a
1967 large immediate). */
c4ddc43a
JW
1968
1969static void
5be6b295
WD
1970aarch64_add_constant_internal (machine_mode mode, int regnum, int scratchreg,
1971 HOST_WIDE_INT delta, bool frame_related_p,
1972 bool emit_move_imm)
c4ddc43a
JW
1973{
1974 HOST_WIDE_INT mdelta = abs_hwi (delta);
1975 rtx this_rtx = gen_rtx_REG (mode, regnum);
37d6a4b7 1976 rtx_insn *insn;
c4ddc43a 1977
c4ddc43a
JW
1978 if (!mdelta)
1979 return;
1980
5be6b295 1981 /* Single instruction adjustment. */
c4ddc43a
JW
1982 if (aarch64_uimm12_shift (mdelta))
1983 {
37d6a4b7
JW
1984 insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta)));
1985 RTX_FRAME_RELATED_P (insn) = frame_related_p;
c4ddc43a
JW
1986 return;
1987 }
1988
5be6b295
WD
1989 /* Emit 2 additions/subtractions if the adjustment is less than 24 bits.
1990 Only do this if mdelta is not a 16-bit move as adjusting using a move
1991 is better. */
1992 if (mdelta < 0x1000000 && !aarch64_move_imm (mdelta, mode))
c4ddc43a
JW
1993 {
1994 HOST_WIDE_INT low_off = mdelta & 0xfff;
1995
1996 low_off = delta < 0 ? -low_off : low_off;
37d6a4b7
JW
1997 insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (low_off)));
1998 RTX_FRAME_RELATED_P (insn) = frame_related_p;
1999 insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta - low_off)));
2000 RTX_FRAME_RELATED_P (insn) = frame_related_p;
c4ddc43a
JW
2001 return;
2002 }
2003
5be6b295 2004 /* Emit a move immediate if required and an addition/subtraction. */
c4ddc43a 2005 rtx scratch_rtx = gen_rtx_REG (mode, scratchreg);
5be6b295
WD
2006 if (emit_move_imm)
2007 aarch64_internal_mov_immediate (scratch_rtx, GEN_INT (mdelta), true, mode);
2008 insn = emit_insn (delta < 0 ? gen_sub2_insn (this_rtx, scratch_rtx)
2009 : gen_add2_insn (this_rtx, scratch_rtx));
37d6a4b7
JW
2010 if (frame_related_p)
2011 {
2012 RTX_FRAME_RELATED_P (insn) = frame_related_p;
2013 rtx adj = plus_constant (mode, this_rtx, delta);
2014 add_reg_note (insn , REG_CFA_ADJUST_CFA, gen_rtx_SET (this_rtx, adj));
2015 }
c4ddc43a
JW
2016}
2017
5be6b295
WD
2018static inline void
2019aarch64_add_constant (machine_mode mode, int regnum, int scratchreg,
2020 HOST_WIDE_INT delta)
2021{
2022 aarch64_add_constant_internal (mode, regnum, scratchreg, delta, false, true);
2023}
2024
2025static inline void
2026aarch64_add_sp (int scratchreg, HOST_WIDE_INT delta, bool emit_move_imm)
2027{
2028 aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, delta,
2029 true, emit_move_imm);
2030}
2031
2032static inline void
2033aarch64_sub_sp (int scratchreg, HOST_WIDE_INT delta, bool frame_related_p)
2034{
2035 aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, -delta,
2036 frame_related_p, true);
2037}
2038
43e9d192 2039static bool
fee9ba42
JW
2040aarch64_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
2041 tree exp ATTRIBUTE_UNUSED)
43e9d192 2042{
fee9ba42 2043 /* Currently, always true. */
43e9d192
IB
2044 return true;
2045}
2046
2047/* Implement TARGET_PASS_BY_REFERENCE. */
2048
2049static bool
2050aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
ef4bddc2 2051 machine_mode mode,
43e9d192
IB
2052 const_tree type,
2053 bool named ATTRIBUTE_UNUSED)
2054{
2055 HOST_WIDE_INT size;
ef4bddc2 2056 machine_mode dummymode;
43e9d192
IB
2057 int nregs;
2058
2059 /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
2060 size = (mode == BLKmode && type)
2061 ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2062
aadc1c43
MHD
2063 /* Aggregates are passed by reference based on their size. */
2064 if (type && AGGREGATE_TYPE_P (type))
43e9d192 2065 {
aadc1c43 2066 size = int_size_in_bytes (type);
43e9d192
IB
2067 }
2068
2069 /* Variable sized arguments are always returned by reference. */
2070 if (size < 0)
2071 return true;
2072
2073 /* Can this be a candidate to be passed in fp/simd register(s)? */
2074 if (aarch64_vfp_is_call_or_return_candidate (mode, type,
2075 &dummymode, &nregs,
2076 NULL))
2077 return false;
2078
2079 /* Arguments which are variable sized or larger than 2 registers are
2080 passed by reference unless they are a homogenous floating point
2081 aggregate. */
2082 return size > 2 * UNITS_PER_WORD;
2083}
2084
2085/* Return TRUE if VALTYPE is padded to its least significant bits. */
2086static bool
2087aarch64_return_in_msb (const_tree valtype)
2088{
ef4bddc2 2089 machine_mode dummy_mode;
43e9d192
IB
2090 int dummy_int;
2091
2092 /* Never happens in little-endian mode. */
2093 if (!BYTES_BIG_ENDIAN)
2094 return false;
2095
2096 /* Only composite types smaller than or equal to 16 bytes can
2097 be potentially returned in registers. */
2098 if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
2099 || int_size_in_bytes (valtype) <= 0
2100 || int_size_in_bytes (valtype) > 16)
2101 return false;
2102
2103 /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
2104 or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
2105 is always passed/returned in the least significant bits of fp/simd
2106 register(s). */
2107 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
2108 &dummy_mode, &dummy_int, NULL))
2109 return false;
2110
2111 return true;
2112}
2113
2114/* Implement TARGET_FUNCTION_VALUE.
2115 Define how to find the value returned by a function. */
2116
2117static rtx
2118aarch64_function_value (const_tree type, const_tree func,
2119 bool outgoing ATTRIBUTE_UNUSED)
2120{
ef4bddc2 2121 machine_mode mode;
43e9d192
IB
2122 int unsignedp;
2123 int count;
ef4bddc2 2124 machine_mode ag_mode;
43e9d192
IB
2125
2126 mode = TYPE_MODE (type);
2127 if (INTEGRAL_TYPE_P (type))
2128 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
2129
2130 if (aarch64_return_in_msb (type))
2131 {
2132 HOST_WIDE_INT size = int_size_in_bytes (type);
2133
2134 if (size % UNITS_PER_WORD != 0)
2135 {
2136 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2137 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2138 }
2139 }
2140
2141 if (aarch64_vfp_is_call_or_return_candidate (mode, type,
2142 &ag_mode, &count, NULL))
2143 {
2144 if (!aarch64_composite_type_p (type, mode))
2145 {
2146 gcc_assert (count == 1 && mode == ag_mode);
2147 return gen_rtx_REG (mode, V0_REGNUM);
2148 }
2149 else
2150 {
2151 int i;
2152 rtx par;
2153
2154 par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
2155 for (i = 0; i < count; i++)
2156 {
2157 rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
2158 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
2159 GEN_INT (i * GET_MODE_SIZE (ag_mode)));
2160 XVECEXP (par, 0, i) = tmp;
2161 }
2162 return par;
2163 }
2164 }
2165 else
2166 return gen_rtx_REG (mode, R0_REGNUM);
2167}
2168
2169/* Implements TARGET_FUNCTION_VALUE_REGNO_P.
2170 Return true if REGNO is the number of a hard register in which the values
2171 of called function may come back. */
2172
2173static bool
2174aarch64_function_value_regno_p (const unsigned int regno)
2175{
2176 /* Maximum of 16 bytes can be returned in the general registers. Examples
2177 of 16-byte return values are: 128-bit integers and 16-byte small
2178 structures (excluding homogeneous floating-point aggregates). */
2179 if (regno == R0_REGNUM || regno == R1_REGNUM)
2180 return true;
2181
2182 /* Up to four fp/simd registers can return a function value, e.g. a
2183 homogeneous floating-point aggregate having four members. */
2184 if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
d5726973 2185 return TARGET_FLOAT;
43e9d192
IB
2186
2187 return false;
2188}
2189
2190/* Implement TARGET_RETURN_IN_MEMORY.
2191
2192 If the type T of the result of a function is such that
2193 void func (T arg)
2194 would require that arg be passed as a value in a register (or set of
2195 registers) according to the parameter passing rules, then the result
2196 is returned in the same registers as would be used for such an
2197 argument. */
2198
2199static bool
2200aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
2201{
2202 HOST_WIDE_INT size;
ef4bddc2 2203 machine_mode ag_mode;
43e9d192
IB
2204 int count;
2205
2206 if (!AGGREGATE_TYPE_P (type)
2207 && TREE_CODE (type) != COMPLEX_TYPE
2208 && TREE_CODE (type) != VECTOR_TYPE)
2209 /* Simple scalar types always returned in registers. */
2210 return false;
2211
2212 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
2213 type,
2214 &ag_mode,
2215 &count,
2216 NULL))
2217 return false;
2218
2219 /* Types larger than 2 registers returned in memory. */
2220 size = int_size_in_bytes (type);
2221 return (size < 0 || size > 2 * UNITS_PER_WORD);
2222}
2223
2224static bool
ef4bddc2 2225aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, machine_mode mode,
43e9d192
IB
2226 const_tree type, int *nregs)
2227{
2228 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
2229 return aarch64_vfp_is_call_or_return_candidate (mode,
2230 type,
2231 &pcum->aapcs_vfp_rmode,
2232 nregs,
2233 NULL);
2234}
2235
2236/* Given MODE and TYPE of a function argument, return the alignment in
2237 bits. The idea is to suppress any stronger alignment requested by
2238 the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
2239 This is a helper function for local use only. */
2240
2241static unsigned int
ef4bddc2 2242aarch64_function_arg_alignment (machine_mode mode, const_tree type)
43e9d192 2243{
75d6cc81
AL
2244 if (!type)
2245 return GET_MODE_ALIGNMENT (mode);
2246 if (integer_zerop (TYPE_SIZE (type)))
2247 return 0;
43e9d192 2248
75d6cc81
AL
2249 gcc_assert (TYPE_MODE (type) == mode);
2250
2251 if (!AGGREGATE_TYPE_P (type))
2252 return TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
2253
2254 if (TREE_CODE (type) == ARRAY_TYPE)
2255 return TYPE_ALIGN (TREE_TYPE (type));
2256
2257 unsigned int alignment = 0;
2258
2259 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2260 alignment = std::max (alignment, DECL_ALIGN (field));
43e9d192
IB
2261
2262 return alignment;
2263}
2264
2265/* Layout a function argument according to the AAPCS64 rules. The rule
2266 numbers refer to the rule numbers in the AAPCS64. */
2267
2268static void
ef4bddc2 2269aarch64_layout_arg (cumulative_args_t pcum_v, machine_mode mode,
43e9d192
IB
2270 const_tree type,
2271 bool named ATTRIBUTE_UNUSED)
2272{
2273 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
2274 int ncrn, nvrn, nregs;
2275 bool allocate_ncrn, allocate_nvrn;
3abf17cf 2276 HOST_WIDE_INT size;
43e9d192
IB
2277
2278 /* We need to do this once per argument. */
2279 if (pcum->aapcs_arg_processed)
2280 return;
2281
2282 pcum->aapcs_arg_processed = true;
2283
3abf17cf
YZ
2284 /* Size in bytes, rounded to the nearest multiple of 8 bytes. */
2285 size
4f59f9f2
UB
2286 = ROUND_UP (type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode),
2287 UNITS_PER_WORD);
3abf17cf 2288
43e9d192
IB
2289 allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
2290 allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
2291 mode,
2292 type,
2293 &nregs);
2294
2295 /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
2296 The following code thus handles passing by SIMD/FP registers first. */
2297
2298 nvrn = pcum->aapcs_nvrn;
2299
2300 /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
2301 and homogenous short-vector aggregates (HVA). */
2302 if (allocate_nvrn)
2303 {
261fb553
AL
2304 if (!TARGET_FLOAT)
2305 aarch64_err_no_fpadvsimd (mode, "argument");
2306
43e9d192
IB
2307 if (nvrn + nregs <= NUM_FP_ARG_REGS)
2308 {
2309 pcum->aapcs_nextnvrn = nvrn + nregs;
2310 if (!aarch64_composite_type_p (type, mode))
2311 {
2312 gcc_assert (nregs == 1);
2313 pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
2314 }
2315 else
2316 {
2317 rtx par;
2318 int i;
2319 par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
2320 for (i = 0; i < nregs; i++)
2321 {
2322 rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
2323 V0_REGNUM + nvrn + i);
2324 tmp = gen_rtx_EXPR_LIST
2325 (VOIDmode, tmp,
2326 GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
2327 XVECEXP (par, 0, i) = tmp;
2328 }
2329 pcum->aapcs_reg = par;
2330 }
2331 return;
2332 }
2333 else
2334 {
2335 /* C.3 NSRN is set to 8. */
2336 pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
2337 goto on_stack;
2338 }
2339 }
2340
2341 ncrn = pcum->aapcs_ncrn;
3abf17cf 2342 nregs = size / UNITS_PER_WORD;
43e9d192
IB
2343
2344 /* C6 - C9. though the sign and zero extension semantics are
2345 handled elsewhere. This is the case where the argument fits
2346 entirely general registers. */
2347 if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
2348 {
2349 unsigned int alignment = aarch64_function_arg_alignment (mode, type);
2350
2351 gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
2352
2353 /* C.8 if the argument has an alignment of 16 then the NGRN is
2354 rounded up to the next even number. */
2355 if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
2356 {
2357 ++ncrn;
2358 gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
2359 }
2360 /* NREGS can be 0 when e.g. an empty structure is to be passed.
2361 A reg is still generated for it, but the caller should be smart
2362 enough not to use it. */
2363 if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
2364 {
2365 pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
2366 }
2367 else
2368 {
2369 rtx par;
2370 int i;
2371
2372 par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
2373 for (i = 0; i < nregs; i++)
2374 {
2375 rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
2376 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
2377 GEN_INT (i * UNITS_PER_WORD));
2378 XVECEXP (par, 0, i) = tmp;
2379 }
2380 pcum->aapcs_reg = par;
2381 }
2382
2383 pcum->aapcs_nextncrn = ncrn + nregs;
2384 return;
2385 }
2386
2387 /* C.11 */
2388 pcum->aapcs_nextncrn = NUM_ARG_REGS;
2389
2390 /* The argument is passed on stack; record the needed number of words for
3abf17cf 2391 this argument and align the total size if necessary. */
43e9d192 2392on_stack:
3abf17cf 2393 pcum->aapcs_stack_words = size / UNITS_PER_WORD;
43e9d192 2394 if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
4f59f9f2
UB
2395 pcum->aapcs_stack_size = ROUND_UP (pcum->aapcs_stack_size,
2396 16 / UNITS_PER_WORD);
43e9d192
IB
2397 return;
2398}
2399
2400/* Implement TARGET_FUNCTION_ARG. */
2401
2402static rtx
ef4bddc2 2403aarch64_function_arg (cumulative_args_t pcum_v, machine_mode mode,
43e9d192
IB
2404 const_tree type, bool named)
2405{
2406 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
2407 gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
2408
2409 if (mode == VOIDmode)
2410 return NULL_RTX;
2411
2412 aarch64_layout_arg (pcum_v, mode, type, named);
2413 return pcum->aapcs_reg;
2414}
2415
2416void
2417aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
2418 const_tree fntype ATTRIBUTE_UNUSED,
2419 rtx libname ATTRIBUTE_UNUSED,
2420 const_tree fndecl ATTRIBUTE_UNUSED,
2421 unsigned n_named ATTRIBUTE_UNUSED)
2422{
2423 pcum->aapcs_ncrn = 0;
2424 pcum->aapcs_nvrn = 0;
2425 pcum->aapcs_nextncrn = 0;
2426 pcum->aapcs_nextnvrn = 0;
2427 pcum->pcs_variant = ARM_PCS_AAPCS64;
2428 pcum->aapcs_reg = NULL_RTX;
2429 pcum->aapcs_arg_processed = false;
2430 pcum->aapcs_stack_words = 0;
2431 pcum->aapcs_stack_size = 0;
2432
261fb553
AL
2433 if (!TARGET_FLOAT
2434 && fndecl && TREE_PUBLIC (fndecl)
2435 && fntype && fntype != error_mark_node)
2436 {
2437 const_tree type = TREE_TYPE (fntype);
2438 machine_mode mode ATTRIBUTE_UNUSED; /* To pass pointer as argument. */
2439 int nregs ATTRIBUTE_UNUSED; /* Likewise. */
2440 if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type), type,
2441 &mode, &nregs, NULL))
2442 aarch64_err_no_fpadvsimd (TYPE_MODE (type), "return type");
2443 }
43e9d192
IB
2444 return;
2445}
2446
2447static void
2448aarch64_function_arg_advance (cumulative_args_t pcum_v,
ef4bddc2 2449 machine_mode mode,
43e9d192
IB
2450 const_tree type,
2451 bool named)
2452{
2453 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
2454 if (pcum->pcs_variant == ARM_PCS_AAPCS64)
2455 {
2456 aarch64_layout_arg (pcum_v, mode, type, named);
2457 gcc_assert ((pcum->aapcs_reg != NULL_RTX)
2458 != (pcum->aapcs_stack_words != 0));
2459 pcum->aapcs_arg_processed = false;
2460 pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
2461 pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
2462 pcum->aapcs_stack_size += pcum->aapcs_stack_words;
2463 pcum->aapcs_stack_words = 0;
2464 pcum->aapcs_reg = NULL_RTX;
2465 }
2466}
2467
2468bool
2469aarch64_function_arg_regno_p (unsigned regno)
2470{
2471 return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
2472 || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
2473}
2474
2475/* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
2476 PARM_BOUNDARY bits of alignment, but will be given anything up
2477 to STACK_BOUNDARY bits if the type requires it. This makes sure
2478 that both before and after the layout of each argument, the Next
2479 Stacked Argument Address (NSAA) will have a minimum alignment of
2480 8 bytes. */
2481
2482static unsigned int
ef4bddc2 2483aarch64_function_arg_boundary (machine_mode mode, const_tree type)
43e9d192
IB
2484{
2485 unsigned int alignment = aarch64_function_arg_alignment (mode, type);
2486
2487 if (alignment < PARM_BOUNDARY)
2488 alignment = PARM_BOUNDARY;
2489 if (alignment > STACK_BOUNDARY)
2490 alignment = STACK_BOUNDARY;
2491 return alignment;
2492}
2493
2494/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
2495
2496 Return true if an argument passed on the stack should be padded upwards,
2497 i.e. if the least-significant byte of the stack slot has useful data.
2498
2499 Small aggregate types are placed in the lowest memory address.
2500
2501 The related parameter passing rules are B.4, C.3, C.5 and C.14. */
2502
2503bool
ef4bddc2 2504aarch64_pad_arg_upward (machine_mode mode, const_tree type)
43e9d192
IB
2505{
2506 /* On little-endian targets, the least significant byte of every stack
2507 argument is passed at the lowest byte address of the stack slot. */
2508 if (!BYTES_BIG_ENDIAN)
2509 return true;
2510
00edcfbe 2511 /* Otherwise, integral, floating-point and pointer types are padded downward:
43e9d192
IB
2512 the least significant byte of a stack argument is passed at the highest
2513 byte address of the stack slot. */
2514 if (type
00edcfbe
YZ
2515 ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)
2516 || POINTER_TYPE_P (type))
43e9d192
IB
2517 : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
2518 return false;
2519
2520 /* Everything else padded upward, i.e. data in first byte of stack slot. */
2521 return true;
2522}
2523
2524/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
2525
2526 It specifies padding for the last (may also be the only)
2527 element of a block move between registers and memory. If
2528 assuming the block is in the memory, padding upward means that
2529 the last element is padded after its highest significant byte,
2530 while in downward padding, the last element is padded at the
2531 its least significant byte side.
2532
2533 Small aggregates and small complex types are always padded
2534 upwards.
2535
2536 We don't need to worry about homogeneous floating-point or
2537 short-vector aggregates; their move is not affected by the
2538 padding direction determined here. Regardless of endianness,
2539 each element of such an aggregate is put in the least
2540 significant bits of a fp/simd register.
2541
2542 Return !BYTES_BIG_ENDIAN if the least significant byte of the
2543 register has useful data, and return the opposite if the most
2544 significant byte does. */
2545
2546bool
ef4bddc2 2547aarch64_pad_reg_upward (machine_mode mode, const_tree type,
43e9d192
IB
2548 bool first ATTRIBUTE_UNUSED)
2549{
2550
2551 /* Small composite types are always padded upward. */
2552 if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
2553 {
2554 HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
2555 : GET_MODE_SIZE (mode));
2556 if (size < 2 * UNITS_PER_WORD)
2557 return true;
2558 }
2559
2560 /* Otherwise, use the default padding. */
2561 return !BYTES_BIG_ENDIAN;
2562}
2563
ef4bddc2 2564static machine_mode
43e9d192
IB
2565aarch64_libgcc_cmp_return_mode (void)
2566{
2567 return SImode;
2568}
2569
a3eb8a52
EB
2570#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
2571
2572/* We use the 12-bit shifted immediate arithmetic instructions so values
2573 must be multiple of (1 << 12), i.e. 4096. */
2574#define ARITH_FACTOR 4096
2575
2576#if (PROBE_INTERVAL % ARITH_FACTOR) != 0
2577#error Cannot use simple address calculation for stack probing
2578#endif
2579
2580/* The pair of scratch registers used for stack probing. */
2581#define PROBE_STACK_FIRST_REG 9
2582#define PROBE_STACK_SECOND_REG 10
2583
2584/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
2585 inclusive. These are offsets from the current stack pointer. */
2586
2587static void
2588aarch64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
2589{
2590 rtx reg1 = gen_rtx_REG (ptr_mode, PROBE_STACK_FIRST_REG);
2591
2592 /* See the same assertion on PROBE_INTERVAL above. */
2593 gcc_assert ((first % ARITH_FACTOR) == 0);
2594
2595 /* See if we have a constant small number of probes to generate. If so,
2596 that's the easy case. */
2597 if (size <= PROBE_INTERVAL)
2598 {
2599 const HOST_WIDE_INT base = ROUND_UP (size, ARITH_FACTOR);
2600
2601 emit_set_insn (reg1,
2602 plus_constant (ptr_mode,
2603 stack_pointer_rtx, -(first + base)));
2604 emit_stack_probe (plus_constant (ptr_mode, reg1, base - size));
2605 }
2606
2607 /* The run-time loop is made up of 8 insns in the generic case while the
2608 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
2609 else if (size <= 4 * PROBE_INTERVAL)
2610 {
2611 HOST_WIDE_INT i, rem;
2612
2613 emit_set_insn (reg1,
2614 plus_constant (ptr_mode,
2615 stack_pointer_rtx,
2616 -(first + PROBE_INTERVAL)));
2617 emit_stack_probe (reg1);
2618
2619 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
2620 it exceeds SIZE. If only two probes are needed, this will not
2621 generate any code. Then probe at FIRST + SIZE. */
2622 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
2623 {
2624 emit_set_insn (reg1,
2625 plus_constant (ptr_mode, reg1, -PROBE_INTERVAL));
2626 emit_stack_probe (reg1);
2627 }
2628
2629 rem = size - (i - PROBE_INTERVAL);
2630 if (rem > 256)
2631 {
2632 const HOST_WIDE_INT base = ROUND_UP (rem, ARITH_FACTOR);
2633
2634 emit_set_insn (reg1, plus_constant (ptr_mode, reg1, -base));
2635 emit_stack_probe (plus_constant (ptr_mode, reg1, base - rem));
2636 }
2637 else
2638 emit_stack_probe (plus_constant (ptr_mode, reg1, -rem));
2639 }
2640
2641 /* Otherwise, do the same as above, but in a loop. Note that we must be
2642 extra careful with variables wrapping around because we might be at
2643 the very top (or the very bottom) of the address space and we have
2644 to be able to handle this case properly; in particular, we use an
2645 equality test for the loop condition. */
2646 else
2647 {
2648 rtx reg2 = gen_rtx_REG (ptr_mode, PROBE_STACK_SECOND_REG);
2649
2650 /* Step 1: round SIZE to the previous multiple of the interval. */
2651
2652 HOST_WIDE_INT rounded_size = size & -PROBE_INTERVAL;
2653
2654
2655 /* Step 2: compute initial and final value of the loop counter. */
2656
2657 /* TEST_ADDR = SP + FIRST. */
2658 emit_set_insn (reg1,
2659 plus_constant (ptr_mode, stack_pointer_rtx, -first));
2660
2661 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
2662 emit_set_insn (reg2,
2663 plus_constant (ptr_mode, stack_pointer_rtx,
2664 -(first + rounded_size)));
2665
2666
2667 /* Step 3: the loop
2668
2669 do
2670 {
2671 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
2672 probe at TEST_ADDR
2673 }
2674 while (TEST_ADDR != LAST_ADDR)
2675
2676 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
2677 until it is equal to ROUNDED_SIZE. */
2678
2679 if (ptr_mode == DImode)
2680 emit_insn (gen_probe_stack_range_di (reg1, reg1, reg2));
2681 else
2682 emit_insn (gen_probe_stack_range_si (reg1, reg1, reg2));
2683
2684
2685 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
2686 that SIZE is equal to ROUNDED_SIZE. */
2687
2688 if (size != rounded_size)
2689 {
2690 HOST_WIDE_INT rem = size - rounded_size;
2691
2692 if (rem > 256)
2693 {
2694 const HOST_WIDE_INT base = ROUND_UP (rem, ARITH_FACTOR);
2695
2696 emit_set_insn (reg2, plus_constant (ptr_mode, reg2, -base));
2697 emit_stack_probe (plus_constant (ptr_mode, reg2, base - rem));
2698 }
2699 else
2700 emit_stack_probe (plus_constant (ptr_mode, reg2, -rem));
2701 }
2702 }
2703
2704 /* Make sure nothing is scheduled before we are done. */
2705 emit_insn (gen_blockage ());
2706}
2707
2708/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
2709 absolute addresses. */
2710
2711const char *
2712aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
2713{
2714 static int labelno = 0;
2715 char loop_lab[32];
2716 rtx xops[2];
2717
2718 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
2719
2720 /* Loop. */
2721 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
2722
2723 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
2724 xops[0] = reg1;
2725 xops[1] = GEN_INT (PROBE_INTERVAL);
2726 output_asm_insn ("sub\t%0, %0, %1", xops);
2727
2728 /* Probe at TEST_ADDR. */
2729 output_asm_insn ("str\txzr, [%0]", xops);
2730
2731 /* Test if TEST_ADDR == LAST_ADDR. */
2732 xops[1] = reg2;
2733 output_asm_insn ("cmp\t%0, %1", xops);
2734
2735 /* Branch. */
2736 fputs ("\tb.ne\t", asm_out_file);
2737 assemble_name_raw (asm_out_file, loop_lab);
2738 fputc ('\n', asm_out_file);
2739
2740 return "";
2741}
2742
43e9d192
IB
2743static bool
2744aarch64_frame_pointer_required (void)
2745{
0b7f8166
MS
2746 /* In aarch64_override_options_after_change
2747 flag_omit_leaf_frame_pointer turns off the frame pointer by
2748 default. Turn it back on now if we've not got a leaf
2749 function. */
2750 if (flag_omit_leaf_frame_pointer
2751 && (!crtl->is_leaf || df_regs_ever_live_p (LR_REGNUM)))
2752 return true;
43e9d192 2753
0b7f8166 2754 return false;
43e9d192
IB
2755}
2756
2757/* Mark the registers that need to be saved by the callee and calculate
2758 the size of the callee-saved registers area and frame record (both FP
2759 and LR may be omitted). */
2760static void
2761aarch64_layout_frame (void)
2762{
2763 HOST_WIDE_INT offset = 0;
4b0685d9 2764 int regno, last_fp_reg = INVALID_REGNUM;
43e9d192
IB
2765
2766 if (reload_completed && cfun->machine->frame.laid_out)
2767 return;
2768
97826595
MS
2769#define SLOT_NOT_REQUIRED (-2)
2770#define SLOT_REQUIRED (-1)
2771
71bfb77a
WD
2772 cfun->machine->frame.wb_candidate1 = INVALID_REGNUM;
2773 cfun->machine->frame.wb_candidate2 = INVALID_REGNUM;
363ffa50 2774
43e9d192
IB
2775 /* First mark all the registers that really need to be saved... */
2776 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
97826595 2777 cfun->machine->frame.reg_offset[regno] = SLOT_NOT_REQUIRED;
43e9d192
IB
2778
2779 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
97826595 2780 cfun->machine->frame.reg_offset[regno] = SLOT_NOT_REQUIRED;
43e9d192
IB
2781
2782 /* ... that includes the eh data registers (if needed)... */
2783 if (crtl->calls_eh_return)
2784 for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
97826595
MS
2785 cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)]
2786 = SLOT_REQUIRED;
43e9d192
IB
2787
2788 /* ... and any callee saved register that dataflow says is live. */
2789 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
2790 if (df_regs_ever_live_p (regno)
1c923b60
JW
2791 && (regno == R30_REGNUM
2792 || !call_used_regs[regno]))
97826595 2793 cfun->machine->frame.reg_offset[regno] = SLOT_REQUIRED;
43e9d192
IB
2794
2795 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
2796 if (df_regs_ever_live_p (regno)
2797 && !call_used_regs[regno])
4b0685d9
WD
2798 {
2799 cfun->machine->frame.reg_offset[regno] = SLOT_REQUIRED;
2800 last_fp_reg = regno;
2801 }
43e9d192
IB
2802
2803 if (frame_pointer_needed)
2804 {
2e1cdae5 2805 /* FP and LR are placed in the linkage record. */
43e9d192 2806 cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
363ffa50 2807 cfun->machine->frame.wb_candidate1 = R29_REGNUM;
2e1cdae5 2808 cfun->machine->frame.reg_offset[R30_REGNUM] = UNITS_PER_WORD;
363ffa50 2809 cfun->machine->frame.wb_candidate2 = R30_REGNUM;
2e1cdae5 2810 offset += 2 * UNITS_PER_WORD;
43e9d192
IB
2811 }
2812
2813 /* Now assign stack slots for them. */
2e1cdae5 2814 for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
97826595 2815 if (cfun->machine->frame.reg_offset[regno] == SLOT_REQUIRED)
43e9d192
IB
2816 {
2817 cfun->machine->frame.reg_offset[regno] = offset;
71bfb77a 2818 if (cfun->machine->frame.wb_candidate1 == INVALID_REGNUM)
363ffa50 2819 cfun->machine->frame.wb_candidate1 = regno;
71bfb77a 2820 else if (cfun->machine->frame.wb_candidate2 == INVALID_REGNUM)
363ffa50 2821 cfun->machine->frame.wb_candidate2 = regno;
43e9d192
IB
2822 offset += UNITS_PER_WORD;
2823 }
2824
4b0685d9
WD
2825 HOST_WIDE_INT max_int_offset = offset;
2826 offset = ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
2827 bool has_align_gap = offset != max_int_offset;
2828
43e9d192 2829 for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
97826595 2830 if (cfun->machine->frame.reg_offset[regno] == SLOT_REQUIRED)
43e9d192 2831 {
4b0685d9
WD
2832 /* If there is an alignment gap between integer and fp callee-saves,
2833 allocate the last fp register to it if possible. */
2834 if (regno == last_fp_reg && has_align_gap && (offset & 8) == 0)
2835 {
2836 cfun->machine->frame.reg_offset[regno] = max_int_offset;
2837 break;
2838 }
2839
43e9d192 2840 cfun->machine->frame.reg_offset[regno] = offset;
71bfb77a 2841 if (cfun->machine->frame.wb_candidate1 == INVALID_REGNUM)
363ffa50 2842 cfun->machine->frame.wb_candidate1 = regno;
71bfb77a 2843 else if (cfun->machine->frame.wb_candidate2 == INVALID_REGNUM
363ffa50
JW
2844 && cfun->machine->frame.wb_candidate1 >= V0_REGNUM)
2845 cfun->machine->frame.wb_candidate2 = regno;
43e9d192
IB
2846 offset += UNITS_PER_WORD;
2847 }
2848
4f59f9f2 2849 offset = ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
43e9d192
IB
2850
2851 cfun->machine->frame.saved_regs_size = offset;
1c960e02 2852
71bfb77a
WD
2853 HOST_WIDE_INT varargs_and_saved_regs_size
2854 = offset + cfun->machine->frame.saved_varargs_size;
2855
1c960e02 2856 cfun->machine->frame.hard_fp_offset
71bfb77a 2857 = ROUND_UP (varargs_and_saved_regs_size + get_frame_size (),
4f59f9f2 2858 STACK_BOUNDARY / BITS_PER_UNIT);
1c960e02
MS
2859
2860 cfun->machine->frame.frame_size
4f59f9f2
UB
2861 = ROUND_UP (cfun->machine->frame.hard_fp_offset
2862 + crtl->outgoing_args_size,
2863 STACK_BOUNDARY / BITS_PER_UNIT);
1c960e02 2864
71bfb77a
WD
2865 cfun->machine->frame.locals_offset = cfun->machine->frame.saved_varargs_size;
2866
2867 cfun->machine->frame.initial_adjust = 0;
2868 cfun->machine->frame.final_adjust = 0;
2869 cfun->machine->frame.callee_adjust = 0;
2870 cfun->machine->frame.callee_offset = 0;
2871
2872 HOST_WIDE_INT max_push_offset = 0;
2873 if (cfun->machine->frame.wb_candidate2 != INVALID_REGNUM)
2874 max_push_offset = 512;
2875 else if (cfun->machine->frame.wb_candidate1 != INVALID_REGNUM)
2876 max_push_offset = 256;
2877
2878 if (cfun->machine->frame.frame_size < max_push_offset
2879 && crtl->outgoing_args_size == 0)
2880 {
2881 /* Simple, small frame with no outgoing arguments:
2882 stp reg1, reg2, [sp, -frame_size]!
2883 stp reg3, reg4, [sp, 16] */
2884 cfun->machine->frame.callee_adjust = cfun->machine->frame.frame_size;
2885 }
2886 else if ((crtl->outgoing_args_size
2887 + cfun->machine->frame.saved_regs_size < 512)
2888 && !(cfun->calls_alloca
2889 && cfun->machine->frame.hard_fp_offset < max_push_offset))
2890 {
2891 /* Frame with small outgoing arguments:
2892 sub sp, sp, frame_size
2893 stp reg1, reg2, [sp, outgoing_args_size]
2894 stp reg3, reg4, [sp, outgoing_args_size + 16] */
2895 cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
2896 cfun->machine->frame.callee_offset
2897 = cfun->machine->frame.frame_size - cfun->machine->frame.hard_fp_offset;
2898 }
2899 else if (cfun->machine->frame.hard_fp_offset < max_push_offset)
2900 {
2901 /* Frame with large outgoing arguments but a small local area:
2902 stp reg1, reg2, [sp, -hard_fp_offset]!
2903 stp reg3, reg4, [sp, 16]
2904 sub sp, sp, outgoing_args_size */
2905 cfun->machine->frame.callee_adjust = cfun->machine->frame.hard_fp_offset;
2906 cfun->machine->frame.final_adjust
2907 = cfun->machine->frame.frame_size - cfun->machine->frame.callee_adjust;
2908 }
2909 else if (!frame_pointer_needed
2910 && varargs_and_saved_regs_size < max_push_offset)
2911 {
2912 /* Frame with large local area and outgoing arguments (this pushes the
2913 callee-saves first, followed by the locals and outgoing area):
2914 stp reg1, reg2, [sp, -varargs_and_saved_regs_size]!
2915 stp reg3, reg4, [sp, 16]
2916 sub sp, sp, frame_size - varargs_and_saved_regs_size */
2917 cfun->machine->frame.callee_adjust = varargs_and_saved_regs_size;
2918 cfun->machine->frame.final_adjust
2919 = cfun->machine->frame.frame_size - cfun->machine->frame.callee_adjust;
2920 cfun->machine->frame.hard_fp_offset = cfun->machine->frame.callee_adjust;
2921 cfun->machine->frame.locals_offset = cfun->machine->frame.hard_fp_offset;
2922 }
2923 else
2924 {
2925 /* Frame with large local area and outgoing arguments using frame pointer:
2926 sub sp, sp, hard_fp_offset
2927 stp x29, x30, [sp, 0]
2928 add x29, sp, 0
2929 stp reg3, reg4, [sp, 16]
2930 sub sp, sp, outgoing_args_size */
2931 cfun->machine->frame.initial_adjust = cfun->machine->frame.hard_fp_offset;
2932 cfun->machine->frame.final_adjust
2933 = cfun->machine->frame.frame_size - cfun->machine->frame.initial_adjust;
2934 }
2935
43e9d192
IB
2936 cfun->machine->frame.laid_out = true;
2937}
2938
04ddfe06
KT
2939/* Return true if the register REGNO is saved on entry to
2940 the current function. */
2941
43e9d192
IB
2942static bool
2943aarch64_register_saved_on_entry (int regno)
2944{
97826595 2945 return cfun->machine->frame.reg_offset[regno] >= 0;
43e9d192
IB
2946}
2947
04ddfe06
KT
2948/* Return the next register up from REGNO up to LIMIT for the callee
2949 to save. */
2950
64dedd72
JW
2951static unsigned
2952aarch64_next_callee_save (unsigned regno, unsigned limit)
2953{
2954 while (regno <= limit && !aarch64_register_saved_on_entry (regno))
2955 regno ++;
2956 return regno;
2957}
43e9d192 2958
04ddfe06
KT
2959/* Push the register number REGNO of mode MODE to the stack with write-back
2960 adjusting the stack by ADJUSTMENT. */
2961
c5e1f66e 2962static void
ef4bddc2 2963aarch64_pushwb_single_reg (machine_mode mode, unsigned regno,
c5e1f66e
JW
2964 HOST_WIDE_INT adjustment)
2965 {
2966 rtx base_rtx = stack_pointer_rtx;
2967 rtx insn, reg, mem;
2968
2969 reg = gen_rtx_REG (mode, regno);
2970 mem = gen_rtx_PRE_MODIFY (Pmode, base_rtx,
2971 plus_constant (Pmode, base_rtx, -adjustment));
2972 mem = gen_rtx_MEM (mode, mem);
2973
2974 insn = emit_move_insn (mem, reg);
2975 RTX_FRAME_RELATED_P (insn) = 1;
2976}
2977
04ddfe06
KT
2978/* Generate and return an instruction to store the pair of registers
2979 REG and REG2 of mode MODE to location BASE with write-back adjusting
2980 the stack location BASE by ADJUSTMENT. */
2981
80c11907 2982static rtx
ef4bddc2 2983aarch64_gen_storewb_pair (machine_mode mode, rtx base, rtx reg, rtx reg2,
80c11907
JW
2984 HOST_WIDE_INT adjustment)
2985{
2986 switch (mode)
2987 {
2988 case DImode:
2989 return gen_storewb_pairdi_di (base, base, reg, reg2,
2990 GEN_INT (-adjustment),
2991 GEN_INT (UNITS_PER_WORD - adjustment));
2992 case DFmode:
2993 return gen_storewb_pairdf_di (base, base, reg, reg2,
2994 GEN_INT (-adjustment),
2995 GEN_INT (UNITS_PER_WORD - adjustment));
2996 default:
2997 gcc_unreachable ();
2998 }
2999}
3000
04ddfe06
KT
3001/* Push registers numbered REGNO1 and REGNO2 to the stack, adjusting the
3002 stack pointer by ADJUSTMENT. */
3003
80c11907 3004static void
89ac681e 3005aarch64_push_regs (unsigned regno1, unsigned regno2, HOST_WIDE_INT adjustment)
80c11907 3006{
5d8a22a5 3007 rtx_insn *insn;
89ac681e
WD
3008 machine_mode mode = (regno1 <= R30_REGNUM) ? DImode : DFmode;
3009
71bfb77a 3010 if (regno2 == INVALID_REGNUM)
89ac681e
WD
3011 return aarch64_pushwb_single_reg (mode, regno1, adjustment);
3012
80c11907
JW
3013 rtx reg1 = gen_rtx_REG (mode, regno1);
3014 rtx reg2 = gen_rtx_REG (mode, regno2);
3015
3016 insn = emit_insn (aarch64_gen_storewb_pair (mode, stack_pointer_rtx, reg1,
3017 reg2, adjustment));
3018 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
80c11907
JW
3019 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3020 RTX_FRAME_RELATED_P (insn) = 1;
3021}
3022
04ddfe06
KT
3023/* Load the pair of register REG, REG2 of mode MODE from stack location BASE,
3024 adjusting it by ADJUSTMENT afterwards. */
3025
159313d9 3026static rtx
ef4bddc2 3027aarch64_gen_loadwb_pair (machine_mode mode, rtx base, rtx reg, rtx reg2,
159313d9
JW
3028 HOST_WIDE_INT adjustment)
3029{
3030 switch (mode)
3031 {
3032 case DImode:
3033 return gen_loadwb_pairdi_di (base, base, reg, reg2, GEN_INT (adjustment),
3e322b3f 3034 GEN_INT (UNITS_PER_WORD));
159313d9
JW
3035 case DFmode:
3036 return gen_loadwb_pairdf_di (base, base, reg, reg2, GEN_INT (adjustment),
3e322b3f 3037 GEN_INT (UNITS_PER_WORD));
159313d9
JW
3038 default:
3039 gcc_unreachable ();
3040 }
3041}
3042
04ddfe06
KT
3043/* Pop the two registers numbered REGNO1, REGNO2 from the stack, adjusting it
3044 afterwards by ADJUSTMENT and writing the appropriate REG_CFA_RESTORE notes
3045 into CFI_OPS. */
3046
89ac681e
WD
3047static void
3048aarch64_pop_regs (unsigned regno1, unsigned regno2, HOST_WIDE_INT adjustment,
3049 rtx *cfi_ops)
3050{
3051 machine_mode mode = (regno1 <= R30_REGNUM) ? DImode : DFmode;
3052 rtx reg1 = gen_rtx_REG (mode, regno1);
3053
3054 *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg1, *cfi_ops);
3055
71bfb77a 3056 if (regno2 == INVALID_REGNUM)
89ac681e
WD
3057 {
3058 rtx mem = plus_constant (Pmode, stack_pointer_rtx, adjustment);
3059 mem = gen_rtx_POST_MODIFY (Pmode, stack_pointer_rtx, mem);
3060 emit_move_insn (reg1, gen_rtx_MEM (mode, mem));
3061 }
3062 else
3063 {
3064 rtx reg2 = gen_rtx_REG (mode, regno2);
3065 *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg2, *cfi_ops);
3066 emit_insn (aarch64_gen_loadwb_pair (mode, stack_pointer_rtx, reg1,
3067 reg2, adjustment));
3068 }
3069}
3070
04ddfe06
KT
3071/* Generate and return a store pair instruction of mode MODE to store
3072 register REG1 to MEM1 and register REG2 to MEM2. */
3073
72df5c1f 3074static rtx
ef4bddc2 3075aarch64_gen_store_pair (machine_mode mode, rtx mem1, rtx reg1, rtx mem2,
72df5c1f
JW
3076 rtx reg2)
3077{
3078 switch (mode)
3079 {
3080 case DImode:
3081 return gen_store_pairdi (mem1, reg1, mem2, reg2);
3082
3083 case DFmode:
3084 return gen_store_pairdf (mem1, reg1, mem2, reg2);
3085
3086 default:
3087 gcc_unreachable ();
3088 }
3089}
3090
04ddfe06
KT
3091/* Generate and regurn a load pair isntruction of mode MODE to load register
3092 REG1 from MEM1 and register REG2 from MEM2. */
3093
72df5c1f 3094static rtx
ef4bddc2 3095aarch64_gen_load_pair (machine_mode mode, rtx reg1, rtx mem1, rtx reg2,
72df5c1f
JW
3096 rtx mem2)
3097{
3098 switch (mode)
3099 {
3100 case DImode:
3101 return gen_load_pairdi (reg1, mem1, reg2, mem2);
3102
3103 case DFmode:
3104 return gen_load_pairdf (reg1, mem1, reg2, mem2);
3105
3106 default:
3107 gcc_unreachable ();
3108 }
3109}
3110
04ddfe06
KT
3111/* Emit code to save the callee-saved registers from register number START
3112 to LIMIT to the stack at the location starting at offset START_OFFSET,
3113 skipping any write-back candidates if SKIP_WB is true. */
43e9d192 3114
43e9d192 3115static void
ef4bddc2 3116aarch64_save_callee_saves (machine_mode mode, HOST_WIDE_INT start_offset,
ae13fce3 3117 unsigned start, unsigned limit, bool skip_wb)
43e9d192 3118{
5d8a22a5 3119 rtx_insn *insn;
ef4bddc2 3120 rtx (*gen_mem_ref) (machine_mode, rtx) = (frame_pointer_needed
a007a21c 3121 ? gen_frame_mem : gen_rtx_MEM);
43e9d192
IB
3122 unsigned regno;
3123 unsigned regno2;
3124
0ec74a1e 3125 for (regno = aarch64_next_callee_save (start, limit);
64dedd72
JW
3126 regno <= limit;
3127 regno = aarch64_next_callee_save (regno + 1, limit))
43e9d192 3128 {
ae13fce3
JW
3129 rtx reg, mem;
3130 HOST_WIDE_INT offset;
64dedd72 3131
ae13fce3
JW
3132 if (skip_wb
3133 && (regno == cfun->machine->frame.wb_candidate1
3134 || regno == cfun->machine->frame.wb_candidate2))
3135 continue;
3136
3137 reg = gen_rtx_REG (mode, regno);
3138 offset = start_offset + cfun->machine->frame.reg_offset[regno];
0ec74a1e
JW
3139 mem = gen_mem_ref (mode, plus_constant (Pmode, stack_pointer_rtx,
3140 offset));
64dedd72
JW
3141
3142 regno2 = aarch64_next_callee_save (regno + 1, limit);
3143
3144 if (regno2 <= limit
3145 && ((cfun->machine->frame.reg_offset[regno] + UNITS_PER_WORD)
3146 == cfun->machine->frame.reg_offset[regno2]))
3147
43e9d192 3148 {
0ec74a1e 3149 rtx reg2 = gen_rtx_REG (mode, regno2);
64dedd72
JW
3150 rtx mem2;
3151
3152 offset = start_offset + cfun->machine->frame.reg_offset[regno2];
8ed2fc62
JW
3153 mem2 = gen_mem_ref (mode, plus_constant (Pmode, stack_pointer_rtx,
3154 offset));
3155 insn = emit_insn (aarch64_gen_store_pair (mode, mem, reg, mem2,
3156 reg2));
0b4a9743 3157
64dedd72
JW
3158 /* The first part of a frame-related parallel insn is
3159 always assumed to be relevant to the frame
3160 calculations; subsequent parts, are only
3161 frame-related if explicitly marked. */
3162 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3163 regno = regno2;
3164 }
3165 else
8ed2fc62
JW
3166 insn = emit_move_insn (mem, reg);
3167
3168 RTX_FRAME_RELATED_P (insn) = 1;
3169 }
3170}
3171
04ddfe06
KT
3172/* Emit code to restore the callee registers of mode MODE from register
3173 number START up to and including LIMIT. Restore from the stack offset
3174 START_OFFSET, skipping any write-back candidates if SKIP_WB is true.
3175 Write the appropriate REG_CFA_RESTORE notes into CFI_OPS. */
3176
8ed2fc62 3177static void
ef4bddc2 3178aarch64_restore_callee_saves (machine_mode mode,
8ed2fc62 3179 HOST_WIDE_INT start_offset, unsigned start,
dd991abb 3180 unsigned limit, bool skip_wb, rtx *cfi_ops)
8ed2fc62 3181{
8ed2fc62 3182 rtx base_rtx = stack_pointer_rtx;
ef4bddc2 3183 rtx (*gen_mem_ref) (machine_mode, rtx) = (frame_pointer_needed
8ed2fc62
JW
3184 ? gen_frame_mem : gen_rtx_MEM);
3185 unsigned regno;
3186 unsigned regno2;
3187 HOST_WIDE_INT offset;
3188
3189 for (regno = aarch64_next_callee_save (start, limit);
3190 regno <= limit;
3191 regno = aarch64_next_callee_save (regno + 1, limit))
3192 {
ae13fce3 3193 rtx reg, mem;
8ed2fc62 3194
ae13fce3
JW
3195 if (skip_wb
3196 && (regno == cfun->machine->frame.wb_candidate1
3197 || regno == cfun->machine->frame.wb_candidate2))
3198 continue;
3199
3200 reg = gen_rtx_REG (mode, regno);
8ed2fc62
JW
3201 offset = start_offset + cfun->machine->frame.reg_offset[regno];
3202 mem = gen_mem_ref (mode, plus_constant (Pmode, base_rtx, offset));
3203
3204 regno2 = aarch64_next_callee_save (regno + 1, limit);
3205
3206 if (regno2 <= limit
3207 && ((cfun->machine->frame.reg_offset[regno] + UNITS_PER_WORD)
3208 == cfun->machine->frame.reg_offset[regno2]))
64dedd72 3209 {
8ed2fc62
JW
3210 rtx reg2 = gen_rtx_REG (mode, regno2);
3211 rtx mem2;
3212
3213 offset = start_offset + cfun->machine->frame.reg_offset[regno2];
3214 mem2 = gen_mem_ref (mode, plus_constant (Pmode, base_rtx, offset));
dd991abb 3215 emit_insn (aarch64_gen_load_pair (mode, reg, mem, reg2, mem2));
8ed2fc62 3216
dd991abb 3217 *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg2, *cfi_ops);
8ed2fc62 3218 regno = regno2;
43e9d192 3219 }
8ed2fc62 3220 else
dd991abb
RH
3221 emit_move_insn (reg, mem);
3222 *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg, *cfi_ops);
43e9d192 3223 }
43e9d192
IB
3224}
3225
3226/* AArch64 stack frames generated by this compiler look like:
3227
3228 +-------------------------------+
3229 | |
3230 | incoming stack arguments |
3231 | |
34834420
MS
3232 +-------------------------------+
3233 | | <-- incoming stack pointer (aligned)
43e9d192
IB
3234 | callee-allocated save area |
3235 | for register varargs |
3236 | |
34834420
MS
3237 +-------------------------------+
3238 | local variables | <-- frame_pointer_rtx
43e9d192
IB
3239 | |
3240 +-------------------------------+
454fdba9
RL
3241 | padding0 | \
3242 +-------------------------------+ |
454fdba9 3243 | callee-saved registers | | frame.saved_regs_size
454fdba9
RL
3244 +-------------------------------+ |
3245 | LR' | |
3246 +-------------------------------+ |
34834420
MS
3247 | FP' | / <- hard_frame_pointer_rtx (aligned)
3248 +-------------------------------+
43e9d192
IB
3249 | dynamic allocation |
3250 +-------------------------------+
34834420
MS
3251 | padding |
3252 +-------------------------------+
3253 | outgoing stack arguments | <-- arg_pointer
3254 | |
3255 +-------------------------------+
3256 | | <-- stack_pointer_rtx (aligned)
43e9d192 3257
34834420
MS
3258 Dynamic stack allocations via alloca() decrease stack_pointer_rtx
3259 but leave frame_pointer_rtx and hard_frame_pointer_rtx
3260 unchanged. */
43e9d192
IB
3261
3262/* Generate the prologue instructions for entry into a function.
3263 Establish the stack frame by decreasing the stack pointer with a
3264 properly calculated size and, if necessary, create a frame record
3265 filled with the values of LR and previous frame pointer. The
6991c977 3266 current FP is also set up if it is in use. */
43e9d192
IB
3267
3268void
3269aarch64_expand_prologue (void)
3270{
43e9d192 3271 aarch64_layout_frame ();
43e9d192 3272
71bfb77a
WD
3273 HOST_WIDE_INT frame_size = cfun->machine->frame.frame_size;
3274 HOST_WIDE_INT initial_adjust = cfun->machine->frame.initial_adjust;
3275 HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
3276 HOST_WIDE_INT final_adjust = cfun->machine->frame.final_adjust;
3277 HOST_WIDE_INT callee_offset = cfun->machine->frame.callee_offset;
3278 unsigned reg1 = cfun->machine->frame.wb_candidate1;
3279 unsigned reg2 = cfun->machine->frame.wb_candidate2;
3280 rtx_insn *insn;
43e9d192 3281
dd991abb
RH
3282 if (flag_stack_usage_info)
3283 current_function_static_stack_size = frame_size;
43e9d192 3284
a3eb8a52
EB
3285 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
3286 {
3287 if (crtl->is_leaf && !cfun->calls_alloca)
3288 {
3289 if (frame_size > PROBE_INTERVAL && frame_size > STACK_CHECK_PROTECT)
3290 aarch64_emit_probe_stack_range (STACK_CHECK_PROTECT,
3291 frame_size - STACK_CHECK_PROTECT);
3292 }
3293 else if (frame_size > 0)
3294 aarch64_emit_probe_stack_range (STACK_CHECK_PROTECT, frame_size);
3295 }
3296
5be6b295 3297 aarch64_sub_sp (IP0_REGNUM, initial_adjust, true);
43e9d192 3298
71bfb77a
WD
3299 if (callee_adjust != 0)
3300 aarch64_push_regs (reg1, reg2, callee_adjust);
43e9d192 3301
71bfb77a 3302 if (frame_pointer_needed)
43e9d192 3303 {
71bfb77a
WD
3304 if (callee_adjust == 0)
3305 aarch64_save_callee_saves (DImode, callee_offset, R29_REGNUM,
3306 R30_REGNUM, false);
3307 insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
3308 stack_pointer_rtx,
3309 GEN_INT (callee_offset)));
3310 RTX_FRAME_RELATED_P (insn) = 1;
3311 emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
43e9d192 3312 }
71bfb77a
WD
3313
3314 aarch64_save_callee_saves (DImode, callee_offset, R0_REGNUM, R30_REGNUM,
3315 callee_adjust != 0 || frame_pointer_needed);
3316 aarch64_save_callee_saves (DFmode, callee_offset, V0_REGNUM, V31_REGNUM,
3317 callee_adjust != 0 || frame_pointer_needed);
5be6b295 3318 aarch64_sub_sp (IP1_REGNUM, final_adjust, !frame_pointer_needed);
43e9d192
IB
3319}
3320
4f942779
RL
3321/* Return TRUE if we can use a simple_return insn.
3322
3323 This function checks whether the callee saved stack is empty, which
3324 means no restore actions are need. The pro_and_epilogue will use
3325 this to check whether shrink-wrapping opt is feasible. */
3326
3327bool
3328aarch64_use_return_insn_p (void)
3329{
3330 if (!reload_completed)
3331 return false;
3332
3333 if (crtl->profile)
3334 return false;
3335
3336 aarch64_layout_frame ();
3337
3338 return cfun->machine->frame.frame_size == 0;
3339}
3340
71bfb77a
WD
3341/* Generate the epilogue instructions for returning from a function.
3342 This is almost exactly the reverse of the prolog sequence, except
3343 that we need to insert barriers to avoid scheduling loads that read
3344 from a deallocated stack, and we optimize the unwind records by
3345 emitting them all together if possible. */
43e9d192
IB
3346void
3347aarch64_expand_epilogue (bool for_sibcall)
3348{
43e9d192 3349 aarch64_layout_frame ();
43e9d192 3350
71bfb77a
WD
3351 HOST_WIDE_INT initial_adjust = cfun->machine->frame.initial_adjust;
3352 HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
3353 HOST_WIDE_INT final_adjust = cfun->machine->frame.final_adjust;
3354 HOST_WIDE_INT callee_offset = cfun->machine->frame.callee_offset;
3355 unsigned reg1 = cfun->machine->frame.wb_candidate1;
3356 unsigned reg2 = cfun->machine->frame.wb_candidate2;
3357 rtx cfi_ops = NULL;
3358 rtx_insn *insn;
44c0e7b9 3359
71bfb77a
WD
3360 /* We need to add memory barrier to prevent read from deallocated stack. */
3361 bool need_barrier_p = (get_frame_size ()
3362 + cfun->machine->frame.saved_varargs_size) != 0;
43e9d192 3363
71bfb77a
WD
3364 /* Emit a barrier to prevent loads from a deallocated stack. */
3365 if (final_adjust > crtl->outgoing_args_size || cfun->calls_alloca)
43e9d192 3366 {
71bfb77a
WD
3367 emit_insn (gen_stack_tie (stack_pointer_rtx, stack_pointer_rtx));
3368 need_barrier_p = false;
3369 }
7e8c2bd5 3370
71bfb77a
WD
3371 /* Restore the stack pointer from the frame pointer if it may not
3372 be the same as the stack pointer. */
3373 if (frame_pointer_needed && (final_adjust || cfun->calls_alloca))
3374 {
43e9d192
IB
3375 insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
3376 hard_frame_pointer_rtx,
71bfb77a
WD
3377 GEN_INT (-callee_offset)));
3378 /* If writeback is used when restoring callee-saves, the CFA
3379 is restored on the instruction doing the writeback. */
3380 RTX_FRAME_RELATED_P (insn) = callee_adjust == 0;
43e9d192 3381 }
71bfb77a 3382 else
5be6b295 3383 aarch64_add_sp (IP1_REGNUM, final_adjust, df_regs_ever_live_p (IP1_REGNUM));
43e9d192 3384
71bfb77a
WD
3385 aarch64_restore_callee_saves (DImode, callee_offset, R0_REGNUM, R30_REGNUM,
3386 callee_adjust != 0, &cfi_ops);
3387 aarch64_restore_callee_saves (DFmode, callee_offset, V0_REGNUM, V31_REGNUM,
3388 callee_adjust != 0, &cfi_ops);
43e9d192 3389
71bfb77a
WD
3390 if (need_barrier_p)
3391 emit_insn (gen_stack_tie (stack_pointer_rtx, stack_pointer_rtx));
3392
3393 if (callee_adjust != 0)
3394 aarch64_pop_regs (reg1, reg2, callee_adjust, &cfi_ops);
3395
3396 if (callee_adjust != 0 || initial_adjust > 65536)
3397 {
3398 /* Emit delayed restores and set the CFA to be SP + initial_adjust. */
89ac681e 3399 insn = get_last_insn ();
71bfb77a
WD
3400 rtx new_cfa = plus_constant (Pmode, stack_pointer_rtx, initial_adjust);
3401 REG_NOTES (insn) = alloc_reg_note (REG_CFA_DEF_CFA, new_cfa, cfi_ops);
43e9d192 3402 RTX_FRAME_RELATED_P (insn) = 1;
71bfb77a 3403 cfi_ops = NULL;
43e9d192
IB
3404 }
3405
5be6b295 3406 aarch64_add_sp (IP0_REGNUM, initial_adjust, df_regs_ever_live_p (IP0_REGNUM));
7e8c2bd5 3407
71bfb77a
WD
3408 if (cfi_ops)
3409 {
3410 /* Emit delayed restores and reset the CFA to be SP. */
3411 insn = get_last_insn ();
3412 cfi_ops = alloc_reg_note (REG_CFA_DEF_CFA, stack_pointer_rtx, cfi_ops);
3413 REG_NOTES (insn) = cfi_ops;
3414 RTX_FRAME_RELATED_P (insn) = 1;
dd991abb
RH
3415 }
3416
3417 /* Stack adjustment for exception handler. */
3418 if (crtl->calls_eh_return)
3419 {
3420 /* We need to unwind the stack by the offset computed by
3421 EH_RETURN_STACKADJ_RTX. We have already reset the CFA
3422 to be SP; letting the CFA move during this adjustment
3423 is just as correct as retaining the CFA from the body
3424 of the function. Therefore, do nothing special. */
3425 emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
43e9d192
IB
3426 }
3427
3428 emit_use (gen_rtx_REG (DImode, LR_REGNUM));
3429 if (!for_sibcall)
3430 emit_jump_insn (ret_rtx);
3431}
3432
3433/* Return the place to copy the exception unwinding return address to.
3434 This will probably be a stack slot, but could (in theory be the
3435 return register). */
3436rtx
3437aarch64_final_eh_return_addr (void)
3438{
1c960e02
MS
3439 HOST_WIDE_INT fp_offset;
3440
43e9d192 3441 aarch64_layout_frame ();
1c960e02
MS
3442
3443 fp_offset = cfun->machine->frame.frame_size
3444 - cfun->machine->frame.hard_fp_offset;
43e9d192
IB
3445
3446 if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
3447 return gen_rtx_REG (DImode, LR_REGNUM);
3448
3449 /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
3450 result in a store to save LR introduced by builtin_eh_return () being
3451 incorrectly deleted because the alias is not detected.
3452 So in the calculation of the address to copy the exception unwinding
3453 return address to, we note 2 cases.
3454 If FP is needed and the fp_offset is 0, it means that SP = FP and hence
3455 we return a SP-relative location since all the addresses are SP-relative
3456 in this case. This prevents the store from being optimized away.
3457 If the fp_offset is not 0, then the addresses will be FP-relative and
3458 therefore we return a FP-relative location. */
3459
3460 if (frame_pointer_needed)
3461 {
3462 if (fp_offset)
3463 return gen_frame_mem (DImode,
3464 plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
3465 else
3466 return gen_frame_mem (DImode,
3467 plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD));
3468 }
3469
3470 /* If FP is not needed, we calculate the location of LR, which would be
3471 at the top of the saved registers block. */
3472
3473 return gen_frame_mem (DImode,
3474 plus_constant (Pmode,
3475 stack_pointer_rtx,
3476 fp_offset
3477 + cfun->machine->frame.saved_regs_size
3478 - 2 * UNITS_PER_WORD));
3479}
3480
43e9d192
IB
3481/* Output code to add DELTA to the first argument, and then jump
3482 to FUNCTION. Used for C++ multiple inheritance. */
3483static void
3484aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
3485 HOST_WIDE_INT delta,
3486 HOST_WIDE_INT vcall_offset,
3487 tree function)
3488{
3489 /* The this pointer is always in x0. Note that this differs from
3490 Arm where the this pointer maybe bumped to r1 if r0 is required
3491 to return a pointer to an aggregate. On AArch64 a result value
3492 pointer will be in x8. */
3493 int this_regno = R0_REGNUM;
5d8a22a5
DM
3494 rtx this_rtx, temp0, temp1, addr, funexp;
3495 rtx_insn *insn;
43e9d192 3496
75f1d6fc
SN
3497 reload_completed = 1;
3498 emit_note (NOTE_INSN_PROLOGUE_END);
43e9d192
IB
3499
3500 if (vcall_offset == 0)
5be6b295 3501 aarch64_add_constant (Pmode, this_regno, IP1_REGNUM, delta);
43e9d192
IB
3502 else
3503 {
28514dda 3504 gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0);
43e9d192 3505
75f1d6fc
SN
3506 this_rtx = gen_rtx_REG (Pmode, this_regno);
3507 temp0 = gen_rtx_REG (Pmode, IP0_REGNUM);
3508 temp1 = gen_rtx_REG (Pmode, IP1_REGNUM);
43e9d192 3509
75f1d6fc
SN
3510 addr = this_rtx;
3511 if (delta != 0)
3512 {
3513 if (delta >= -256 && delta < 256)
3514 addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx,
3515 plus_constant (Pmode, this_rtx, delta));
3516 else
5be6b295 3517 aarch64_add_constant (Pmode, this_regno, IP1_REGNUM, delta);
43e9d192
IB
3518 }
3519
28514dda
YZ
3520 if (Pmode == ptr_mode)
3521 aarch64_emit_move (temp0, gen_rtx_MEM (ptr_mode, addr));
3522 else
3523 aarch64_emit_move (temp0,
3524 gen_rtx_ZERO_EXTEND (Pmode,
3525 gen_rtx_MEM (ptr_mode, addr)));
75f1d6fc 3526
28514dda 3527 if (vcall_offset >= -256 && vcall_offset < 4096 * POINTER_BYTES)
75f1d6fc 3528 addr = plus_constant (Pmode, temp0, vcall_offset);
43e9d192
IB
3529 else
3530 {
f43657b4
JW
3531 aarch64_internal_mov_immediate (temp1, GEN_INT (vcall_offset), true,
3532 Pmode);
75f1d6fc 3533 addr = gen_rtx_PLUS (Pmode, temp0, temp1);
43e9d192
IB
3534 }
3535
28514dda
YZ
3536 if (Pmode == ptr_mode)
3537 aarch64_emit_move (temp1, gen_rtx_MEM (ptr_mode,addr));
3538 else
3539 aarch64_emit_move (temp1,
3540 gen_rtx_SIGN_EXTEND (Pmode,
3541 gen_rtx_MEM (ptr_mode, addr)));
3542
75f1d6fc 3543 emit_insn (gen_add2_insn (this_rtx, temp1));
43e9d192
IB
3544 }
3545
75f1d6fc
SN
3546 /* Generate a tail call to the target function. */
3547 if (!TREE_USED (function))
3548 {
3549 assemble_external (function);
3550 TREE_USED (function) = 1;
3551 }
3552 funexp = XEXP (DECL_RTL (function), 0);
3553 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
3554 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
3555 SIBLING_CALL_P (insn) = 1;
3556
3557 insn = get_insns ();
3558 shorten_branches (insn);
3559 final_start_function (insn, file, 1);
3560 final (insn, file, 1);
43e9d192 3561 final_end_function ();
75f1d6fc
SN
3562
3563 /* Stop pretending to be a post-reload pass. */
3564 reload_completed = 0;
43e9d192
IB
3565}
3566
43e9d192
IB
3567static bool
3568aarch64_tls_referenced_p (rtx x)
3569{
3570 if (!TARGET_HAVE_TLS)
3571 return false;
e7de8563
RS
3572 subrtx_iterator::array_type array;
3573 FOR_EACH_SUBRTX (iter, array, x, ALL)
3574 {
3575 const_rtx x = *iter;
3576 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
3577 return true;
3578 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
3579 TLS offsets, not real symbol references. */
3580 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
3581 iter.skip_subrtxes ();
3582 }
3583 return false;
43e9d192
IB
3584}
3585
3586
43e9d192
IB
3587/* Return true if val can be encoded as a 12-bit unsigned immediate with
3588 a left shift of 0 or 12 bits. */
3589bool
3590aarch64_uimm12_shift (HOST_WIDE_INT val)
3591{
3592 return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
3593 || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
3594 );
3595}
3596
3597
3598/* Return true if val is an immediate that can be loaded into a
3599 register by a MOVZ instruction. */
3600static bool
ef4bddc2 3601aarch64_movw_imm (HOST_WIDE_INT val, machine_mode mode)
43e9d192
IB
3602{
3603 if (GET_MODE_SIZE (mode) > 4)
3604 {
3605 if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
3606 || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
3607 return 1;
3608 }
3609 else
3610 {
3611 /* Ignore sign extension. */
3612 val &= (HOST_WIDE_INT) 0xffffffff;
3613 }
3614 return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
3615 || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
3616}
3617
a64c73a2
WD
3618/* Multipliers for repeating bitmasks of width 32, 16, 8, 4, and 2. */
3619
3620static const unsigned HOST_WIDE_INT bitmask_imm_mul[] =
3621 {
3622 0x0000000100000001ull,
3623 0x0001000100010001ull,
3624 0x0101010101010101ull,
3625 0x1111111111111111ull,
3626 0x5555555555555555ull,
3627 };
3628
43e9d192
IB
3629
3630/* Return true if val is a valid bitmask immediate. */
a64c73a2 3631
43e9d192 3632bool
a64c73a2 3633aarch64_bitmask_imm (HOST_WIDE_INT val_in, machine_mode mode)
43e9d192 3634{
a64c73a2
WD
3635 unsigned HOST_WIDE_INT val, tmp, mask, first_one, next_one;
3636 int bits;
3637
3638 /* Check for a single sequence of one bits and return quickly if so.
3639 The special cases of all ones and all zeroes returns false. */
3640 val = (unsigned HOST_WIDE_INT) val_in;
3641 tmp = val + (val & -val);
3642
3643 if (tmp == (tmp & -tmp))
3644 return (val + 1) > 1;
3645
3646 /* Replicate 32-bit immediates so we can treat them as 64-bit. */
3647 if (mode == SImode)
3648 val = (val << 32) | (val & 0xffffffff);
3649
3650 /* Invert if the immediate doesn't start with a zero bit - this means we
3651 only need to search for sequences of one bits. */
3652 if (val & 1)
3653 val = ~val;
3654
3655 /* Find the first set bit and set tmp to val with the first sequence of one
3656 bits removed. Return success if there is a single sequence of ones. */
3657 first_one = val & -val;
3658 tmp = val & (val + first_one);
3659
3660 if (tmp == 0)
3661 return true;
3662
3663 /* Find the next set bit and compute the difference in bit position. */
3664 next_one = tmp & -tmp;
3665 bits = clz_hwi (first_one) - clz_hwi (next_one);
3666 mask = val ^ tmp;
3667
3668 /* Check the bit position difference is a power of 2, and that the first
3669 sequence of one bits fits within 'bits' bits. */
3670 if ((mask >> bits) != 0 || bits != (bits & -bits))
3671 return false;
3672
3673 /* Check the sequence of one bits is repeated 64/bits times. */
3674 return val == mask * bitmask_imm_mul[__builtin_clz (bits) - 26];
43e9d192
IB
3675}
3676
3677
3678/* Return true if val is an immediate that can be loaded into a
3679 register in a single instruction. */
3680bool
ef4bddc2 3681aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode)
43e9d192
IB
3682{
3683 if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
3684 return 1;
3685 return aarch64_bitmask_imm (val, mode);
3686}
3687
3688static bool
ef4bddc2 3689aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
43e9d192
IB
3690{
3691 rtx base, offset;
7eda14e1 3692
43e9d192
IB
3693 if (GET_CODE (x) == HIGH)
3694 return true;
3695
3696 split_const (x, &base, &offset);
3697 if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
28514dda 3698 {
a6e0bfa7 3699 if (aarch64_classify_symbol (base, offset)
28514dda
YZ
3700 != SYMBOL_FORCE_TO_MEM)
3701 return true;
3702 else
3703 /* Avoid generating a 64-bit relocation in ILP32; leave
3704 to aarch64_expand_mov_immediate to handle it properly. */
3705 return mode != ptr_mode;
3706 }
43e9d192
IB
3707
3708 return aarch64_tls_referenced_p (x);
3709}
3710
e79136e4
WD
3711/* Implement TARGET_CASE_VALUES_THRESHOLD.
3712 The expansion for a table switch is quite expensive due to the number
3713 of instructions, the table lookup and hard to predict indirect jump.
3714 When optimizing for speed, and -O3 enabled, use the per-core tuning if
3715 set, otherwise use tables for > 16 cases as a tradeoff between size and
3716 performance. When optimizing for size, use the default setting. */
50487d79
EM
3717
3718static unsigned int
3719aarch64_case_values_threshold (void)
3720{
3721 /* Use the specified limit for the number of cases before using jump
3722 tables at higher optimization levels. */
3723 if (optimize > 2
3724 && selected_cpu->tune->max_case_values != 0)
3725 return selected_cpu->tune->max_case_values;
3726 else
e79136e4 3727 return optimize_size ? default_case_values_threshold () : 17;
50487d79
EM
3728}
3729
43e9d192
IB
3730/* Return true if register REGNO is a valid index register.
3731 STRICT_P is true if REG_OK_STRICT is in effect. */
3732
3733bool
3734aarch64_regno_ok_for_index_p (int regno, bool strict_p)
3735{
3736 if (!HARD_REGISTER_NUM_P (regno))
3737 {
3738 if (!strict_p)
3739 return true;
3740
3741 if (!reg_renumber)
3742 return false;
3743
3744 regno = reg_renumber[regno];
3745 }
3746 return GP_REGNUM_P (regno);
3747}
3748
3749/* Return true if register REGNO is a valid base register for mode MODE.
3750 STRICT_P is true if REG_OK_STRICT is in effect. */
3751
3752bool
3753aarch64_regno_ok_for_base_p (int regno, bool strict_p)
3754{
3755 if (!HARD_REGISTER_NUM_P (regno))
3756 {
3757 if (!strict_p)
3758 return true;
3759
3760 if (!reg_renumber)
3761 return false;
3762
3763 regno = reg_renumber[regno];
3764 }
3765
3766 /* The fake registers will be eliminated to either the stack or
3767 hard frame pointer, both of which are usually valid base registers.
3768 Reload deals with the cases where the eliminated form isn't valid. */
3769 return (GP_REGNUM_P (regno)
3770 || regno == SP_REGNUM
3771 || regno == FRAME_POINTER_REGNUM
3772 || regno == ARG_POINTER_REGNUM);
3773}
3774
3775/* Return true if X is a valid base register for mode MODE.
3776 STRICT_P is true if REG_OK_STRICT is in effect. */
3777
3778static bool
3779aarch64_base_register_rtx_p (rtx x, bool strict_p)
3780{
3781 if (!strict_p && GET_CODE (x) == SUBREG)
3782 x = SUBREG_REG (x);
3783
3784 return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
3785}
3786
3787/* Return true if address offset is a valid index. If it is, fill in INFO
3788 appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
3789
3790static bool
3791aarch64_classify_index (struct aarch64_address_info *info, rtx x,
ef4bddc2 3792 machine_mode mode, bool strict_p)
43e9d192
IB
3793{
3794 enum aarch64_address_type type;
3795 rtx index;
3796 int shift;
3797
3798 /* (reg:P) */
3799 if ((REG_P (x) || GET_CODE (x) == SUBREG)
3800 && GET_MODE (x) == Pmode)
3801 {
3802 type = ADDRESS_REG_REG;
3803 index = x;
3804 shift = 0;
3805 }
3806 /* (sign_extend:DI (reg:SI)) */
3807 else if ((GET_CODE (x) == SIGN_EXTEND
3808 || GET_CODE (x) == ZERO_EXTEND)
3809 && GET_MODE (x) == DImode
3810 && GET_MODE (XEXP (x, 0)) == SImode)
3811 {
3812 type = (GET_CODE (x) == SIGN_EXTEND)
3813 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3814 index = XEXP (x, 0);
3815 shift = 0;
3816 }
3817 /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
3818 else if (GET_CODE (x) == MULT
3819 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
3820 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
3821 && GET_MODE (XEXP (x, 0)) == DImode
3822 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
3823 && CONST_INT_P (XEXP (x, 1)))
3824 {
3825 type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
3826 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3827 index = XEXP (XEXP (x, 0), 0);
3828 shift = exact_log2 (INTVAL (XEXP (x, 1)));
3829 }
3830 /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
3831 else if (GET_CODE (x) == ASHIFT
3832 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
3833 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
3834 && GET_MODE (XEXP (x, 0)) == DImode
3835 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
3836 && CONST_INT_P (XEXP (x, 1)))
3837 {
3838 type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
3839 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3840 index = XEXP (XEXP (x, 0), 0);
3841 shift = INTVAL (XEXP (x, 1));
3842 }
3843 /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
3844 else if ((GET_CODE (x) == SIGN_EXTRACT
3845 || GET_CODE (x) == ZERO_EXTRACT)
3846 && GET_MODE (x) == DImode
3847 && GET_CODE (XEXP (x, 0)) == MULT
3848 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3849 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
3850 {
3851 type = (GET_CODE (x) == SIGN_EXTRACT)
3852 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3853 index = XEXP (XEXP (x, 0), 0);
3854 shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
3855 if (INTVAL (XEXP (x, 1)) != 32 + shift
3856 || INTVAL (XEXP (x, 2)) != 0)
3857 shift = -1;
3858 }
3859 /* (and:DI (mult:DI (reg:DI) (const_int scale))
3860 (const_int 0xffffffff<<shift)) */
3861 else if (GET_CODE (x) == AND
3862 && GET_MODE (x) == DImode
3863 && GET_CODE (XEXP (x, 0)) == MULT
3864 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3865 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3866 && CONST_INT_P (XEXP (x, 1)))
3867 {
3868 type = ADDRESS_REG_UXTW;
3869 index = XEXP (XEXP (x, 0), 0);
3870 shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
3871 if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
3872 shift = -1;
3873 }
3874 /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
3875 else if ((GET_CODE (x) == SIGN_EXTRACT
3876 || GET_CODE (x) == ZERO_EXTRACT)
3877 && GET_MODE (x) == DImode
3878 && GET_CODE (XEXP (x, 0)) == ASHIFT
3879 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3880 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
3881 {
3882 type = (GET_CODE (x) == SIGN_EXTRACT)
3883 ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
3884 index = XEXP (XEXP (x, 0), 0);
3885 shift = INTVAL (XEXP (XEXP (x, 0), 1));
3886 if (INTVAL (XEXP (x, 1)) != 32 + shift
3887 || INTVAL (XEXP (x, 2)) != 0)
3888 shift = -1;
3889 }
3890 /* (and:DI (ashift:DI (reg:DI) (const_int shift))
3891 (const_int 0xffffffff<<shift)) */
3892 else if (GET_CODE (x) == AND
3893 && GET_MODE (x) == DImode
3894 && GET_CODE (XEXP (x, 0)) == ASHIFT
3895 && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
3896 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3897 && CONST_INT_P (XEXP (x, 1)))
3898 {
3899 type = ADDRESS_REG_UXTW;
3900 index = XEXP (XEXP (x, 0), 0);
3901 shift = INTVAL (XEXP (XEXP (x, 0), 1));
3902 if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
3903 shift = -1;
3904 }
3905 /* (mult:P (reg:P) (const_int scale)) */
3906 else if (GET_CODE (x) == MULT
3907 && GET_MODE (x) == Pmode
3908 && GET_MODE (XEXP (x, 0)) == Pmode
3909 && CONST_INT_P (XEXP (x, 1)))
3910 {
3911 type = ADDRESS_REG_REG;
3912 index = XEXP (x, 0);
3913 shift = exact_log2 (INTVAL (XEXP (x, 1)));
3914 }
3915 /* (ashift:P (reg:P) (const_int shift)) */
3916 else if (GET_CODE (x) == ASHIFT
3917 && GET_MODE (x) == Pmode
3918 && GET_MODE (XEXP (x, 0)) == Pmode
3919 && CONST_INT_P (XEXP (x, 1)))
3920 {
3921 type = ADDRESS_REG_REG;
3922 index = XEXP (x, 0);
3923 shift = INTVAL (XEXP (x, 1));
3924 }
3925 else
3926 return false;
3927
3928 if (GET_CODE (index) == SUBREG)
3929 index = SUBREG_REG (index);
3930
3931 if ((shift == 0 ||
3932 (shift > 0 && shift <= 3
3933 && (1 << shift) == GET_MODE_SIZE (mode)))
3934 && REG_P (index)
3935 && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
3936 {
3937 info->type = type;
3938 info->offset = index;
3939 info->shift = shift;
3940 return true;
3941 }
3942
3943 return false;
3944}
3945
44707478 3946bool
ef4bddc2 3947aarch64_offset_7bit_signed_scaled_p (machine_mode mode, HOST_WIDE_INT offset)
43e9d192
IB
3948{
3949 return (offset >= -64 * GET_MODE_SIZE (mode)
3950 && offset < 64 * GET_MODE_SIZE (mode)
3951 && offset % GET_MODE_SIZE (mode) == 0);
3952}
3953
3954static inline bool
ef4bddc2 3955offset_9bit_signed_unscaled_p (machine_mode mode ATTRIBUTE_UNUSED,
43e9d192
IB
3956 HOST_WIDE_INT offset)
3957{
3958 return offset >= -256 && offset < 256;
3959}
3960
3961static inline bool
ef4bddc2 3962offset_12bit_unsigned_scaled_p (machine_mode mode, HOST_WIDE_INT offset)
43e9d192
IB
3963{
3964 return (offset >= 0
3965 && offset < 4096 * GET_MODE_SIZE (mode)
3966 && offset % GET_MODE_SIZE (mode) == 0);
3967}
3968
abc52318
KT
3969/* Return true if MODE is one of the modes for which we
3970 support LDP/STP operations. */
3971
3972static bool
3973aarch64_mode_valid_for_sched_fusion_p (machine_mode mode)
3974{
3975 return mode == SImode || mode == DImode
3976 || mode == SFmode || mode == DFmode
3977 || (aarch64_vector_mode_supported_p (mode)
3978 && GET_MODE_SIZE (mode) == 8);
3979}
3980
9e0218fc
RH
3981/* Return true if REGNO is a virtual pointer register, or an eliminable
3982 "soft" frame register. Like REGNO_PTR_FRAME_P except that we don't
3983 include stack_pointer or hard_frame_pointer. */
3984static bool
3985virt_or_elim_regno_p (unsigned regno)
3986{
3987 return ((regno >= FIRST_VIRTUAL_REGISTER
3988 && regno <= LAST_VIRTUAL_POINTER_REGISTER)
3989 || regno == FRAME_POINTER_REGNUM
3990 || regno == ARG_POINTER_REGNUM);
3991}
3992
43e9d192
IB
3993/* Return true if X is a valid address for machine mode MODE. If it is,
3994 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
3995 effect. OUTER_CODE is PARALLEL for a load/store pair. */
3996
3997static bool
3998aarch64_classify_address (struct aarch64_address_info *info,
ef4bddc2 3999 rtx x, machine_mode mode,
43e9d192
IB
4000 RTX_CODE outer_code, bool strict_p)
4001{
4002 enum rtx_code code = GET_CODE (x);
4003 rtx op0, op1;
2d8c6dc1
AH
4004
4005 /* On BE, we use load/store pair for all large int mode load/stores. */
4006 bool load_store_pair_p = (outer_code == PARALLEL
4007 || (BYTES_BIG_ENDIAN
4008 && aarch64_vect_struct_mode_p (mode)));
4009
43e9d192 4010 bool allow_reg_index_p =
2d8c6dc1
AH
4011 !load_store_pair_p
4012 && (GET_MODE_SIZE (mode) != 16 || aarch64_vector_mode_supported_p (mode))
4013 && !aarch64_vect_struct_mode_p (mode);
4014
4015 /* On LE, for AdvSIMD, don't support anything other than POST_INC or
4016 REG addressing. */
4017 if (aarch64_vect_struct_mode_p (mode) && !BYTES_BIG_ENDIAN
43e9d192
IB
4018 && (code != POST_INC && code != REG))
4019 return false;
4020
4021 switch (code)
4022 {
4023 case REG:
4024 case SUBREG:
4025 info->type = ADDRESS_REG_IMM;
4026 info->base = x;
4027 info->offset = const0_rtx;
4028 return aarch64_base_register_rtx_p (x, strict_p);
4029
4030 case PLUS:
4031 op0 = XEXP (x, 0);
4032 op1 = XEXP (x, 1);
15c0c5c9
JW
4033
4034 if (! strict_p
4aa81c2e 4035 && REG_P (op0)
9e0218fc 4036 && virt_or_elim_regno_p (REGNO (op0))
4aa81c2e 4037 && CONST_INT_P (op1))
15c0c5c9
JW
4038 {
4039 info->type = ADDRESS_REG_IMM;
4040 info->base = op0;
4041 info->offset = op1;
4042
4043 return true;
4044 }
4045
43e9d192
IB
4046 if (GET_MODE_SIZE (mode) != 0
4047 && CONST_INT_P (op1)
4048 && aarch64_base_register_rtx_p (op0, strict_p))
4049 {
4050 HOST_WIDE_INT offset = INTVAL (op1);
4051
4052 info->type = ADDRESS_REG_IMM;
4053 info->base = op0;
4054 info->offset = op1;
4055
4056 /* TImode and TFmode values are allowed in both pairs of X
4057 registers and individual Q registers. The available
4058 address modes are:
4059 X,X: 7-bit signed scaled offset
4060 Q: 9-bit signed offset
4061 We conservatively require an offset representable in either mode.
8ed49fab
KT
4062 When performing the check for pairs of X registers i.e. LDP/STP
4063 pass down DImode since that is the natural size of the LDP/STP
4064 instruction memory accesses. */
43e9d192 4065 if (mode == TImode || mode == TFmode)
8ed49fab 4066 return (aarch64_offset_7bit_signed_scaled_p (DImode, offset)
43e9d192
IB
4067 && offset_9bit_signed_unscaled_p (mode, offset));
4068
2d8c6dc1
AH
4069 /* A 7bit offset check because OImode will emit a ldp/stp
4070 instruction (only big endian will get here).
4071 For ldp/stp instructions, the offset is scaled for the size of a
4072 single element of the pair. */
4073 if (mode == OImode)
4074 return aarch64_offset_7bit_signed_scaled_p (TImode, offset);
4075
4076 /* Three 9/12 bit offsets checks because CImode will emit three
4077 ldr/str instructions (only big endian will get here). */
4078 if (mode == CImode)
4079 return (aarch64_offset_7bit_signed_scaled_p (TImode, offset)
4080 && (offset_9bit_signed_unscaled_p (V16QImode, offset + 32)
4081 || offset_12bit_unsigned_scaled_p (V16QImode,
4082 offset + 32)));
4083
4084 /* Two 7bit offsets checks because XImode will emit two ldp/stp
4085 instructions (only big endian will get here). */
4086 if (mode == XImode)
4087 return (aarch64_offset_7bit_signed_scaled_p (TImode, offset)
4088 && aarch64_offset_7bit_signed_scaled_p (TImode,
4089 offset + 32));
4090
4091 if (load_store_pair_p)
43e9d192 4092 return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
44707478 4093 && aarch64_offset_7bit_signed_scaled_p (mode, offset));
43e9d192
IB
4094 else
4095 return (offset_9bit_signed_unscaled_p (mode, offset)
4096 || offset_12bit_unsigned_scaled_p (mode, offset));
4097 }
4098
4099 if (allow_reg_index_p)
4100 {
4101 /* Look for base + (scaled/extended) index register. */
4102 if (aarch64_base_register_rtx_p (op0, strict_p)
4103 && aarch64_classify_index (info, op1, mode, strict_p))
4104 {
4105 info->base = op0;
4106 return true;
4107 }
4108 if (aarch64_base_register_rtx_p (op1, strict_p)
4109 && aarch64_classify_index (info, op0, mode, strict_p))
4110 {
4111 info->base = op1;
4112 return true;
4113 }
4114 }
4115
4116 return false;
4117
4118 case POST_INC:
4119 case POST_DEC:
4120 case PRE_INC:
4121 case PRE_DEC:
4122 info->type = ADDRESS_REG_WB;
4123 info->base = XEXP (x, 0);
4124 info->offset = NULL_RTX;
4125 return aarch64_base_register_rtx_p (info->base, strict_p);
4126
4127 case POST_MODIFY:
4128 case PRE_MODIFY:
4129 info->type = ADDRESS_REG_WB;
4130 info->base = XEXP (x, 0);
4131 if (GET_CODE (XEXP (x, 1)) == PLUS
4132 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
4133 && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
4134 && aarch64_base_register_rtx_p (info->base, strict_p))
4135 {
4136 HOST_WIDE_INT offset;
4137 info->offset = XEXP (XEXP (x, 1), 1);
4138 offset = INTVAL (info->offset);
4139
4140 /* TImode and TFmode values are allowed in both pairs of X
4141 registers and individual Q registers. The available
4142 address modes are:
4143 X,X: 7-bit signed scaled offset
4144 Q: 9-bit signed offset
4145 We conservatively require an offset representable in either mode.
4146 */
4147 if (mode == TImode || mode == TFmode)
44707478 4148 return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
43e9d192
IB
4149 && offset_9bit_signed_unscaled_p (mode, offset));
4150
2d8c6dc1 4151 if (load_store_pair_p)
43e9d192 4152 return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
44707478 4153 && aarch64_offset_7bit_signed_scaled_p (mode, offset));
43e9d192
IB
4154 else
4155 return offset_9bit_signed_unscaled_p (mode, offset);
4156 }
4157 return false;
4158
4159 case CONST:
4160 case SYMBOL_REF:
4161 case LABEL_REF:
79517551
SN
4162 /* load literal: pc-relative constant pool entry. Only supported
4163 for SI mode or larger. */
43e9d192 4164 info->type = ADDRESS_SYMBOLIC;
2d8c6dc1
AH
4165
4166 if (!load_store_pair_p && GET_MODE_SIZE (mode) >= 4)
43e9d192
IB
4167 {
4168 rtx sym, addend;
4169
4170 split_const (x, &sym, &addend);
b4f50fd4
RR
4171 return ((GET_CODE (sym) == LABEL_REF
4172 || (GET_CODE (sym) == SYMBOL_REF
4173 && CONSTANT_POOL_ADDRESS_P (sym)
9ee6540a 4174 && aarch64_pcrelative_literal_loads)));
43e9d192
IB
4175 }
4176 return false;
4177
4178 case LO_SUM:
4179 info->type = ADDRESS_LO_SUM;
4180 info->base = XEXP (x, 0);
4181 info->offset = XEXP (x, 1);
4182 if (allow_reg_index_p
4183 && aarch64_base_register_rtx_p (info->base, strict_p))
4184 {
4185 rtx sym, offs;
4186 split_const (info->offset, &sym, &offs);
4187 if (GET_CODE (sym) == SYMBOL_REF
a6e0bfa7 4188 && (aarch64_classify_symbol (sym, offs) == SYMBOL_SMALL_ABSOLUTE))
43e9d192
IB
4189 {
4190 /* The symbol and offset must be aligned to the access size. */
4191 unsigned int align;
4192 unsigned int ref_size;
4193
4194 if (CONSTANT_POOL_ADDRESS_P (sym))
4195 align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
4196 else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
4197 {
4198 tree exp = SYMBOL_REF_DECL (sym);
4199 align = TYPE_ALIGN (TREE_TYPE (exp));
4200 align = CONSTANT_ALIGNMENT (exp, align);
4201 }
4202 else if (SYMBOL_REF_DECL (sym))
4203 align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
6c031d8d
KV
4204 else if (SYMBOL_REF_HAS_BLOCK_INFO_P (sym)
4205 && SYMBOL_REF_BLOCK (sym) != NULL)
4206 align = SYMBOL_REF_BLOCK (sym)->alignment;
43e9d192
IB
4207 else
4208 align = BITS_PER_UNIT;
4209
4210 ref_size = GET_MODE_SIZE (mode);
4211 if (ref_size == 0)
4212 ref_size = GET_MODE_SIZE (DImode);
4213
4214 return ((INTVAL (offs) & (ref_size - 1)) == 0
4215 && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
4216 }
4217 }
4218 return false;
4219
4220 default:
4221 return false;
4222 }
4223}
4224
4225bool
4226aarch64_symbolic_address_p (rtx x)
4227{
4228 rtx offset;
4229
4230 split_const (x, &x, &offset);
4231 return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
4232}
4233
a6e0bfa7 4234/* Classify the base of symbolic expression X. */
da4f13a4
MS
4235
4236enum aarch64_symbol_type
a6e0bfa7 4237aarch64_classify_symbolic_expression (rtx x)
43e9d192
IB
4238{
4239 rtx offset;
da4f13a4 4240
43e9d192 4241 split_const (x, &x, &offset);
a6e0bfa7 4242 return aarch64_classify_symbol (x, offset);
43e9d192
IB
4243}
4244
4245
4246/* Return TRUE if X is a legitimate address for accessing memory in
4247 mode MODE. */
4248static bool
ef4bddc2 4249aarch64_legitimate_address_hook_p (machine_mode mode, rtx x, bool strict_p)
43e9d192
IB
4250{
4251 struct aarch64_address_info addr;
4252
4253 return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
4254}
4255
4256/* Return TRUE if X is a legitimate address for accessing memory in
4257 mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
4258 pair operation. */
4259bool
ef4bddc2 4260aarch64_legitimate_address_p (machine_mode mode, rtx x,
aef66c94 4261 RTX_CODE outer_code, bool strict_p)
43e9d192
IB
4262{
4263 struct aarch64_address_info addr;
4264
4265 return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
4266}
4267
491ec060
WD
4268/* Split an out-of-range address displacement into a base and offset.
4269 Use 4KB range for 1- and 2-byte accesses and a 16KB range otherwise
4270 to increase opportunities for sharing the base address of different sizes.
4271 For TI/TFmode and unaligned accesses use a 256-byte range. */
4272static bool
4273aarch64_legitimize_address_displacement (rtx *disp, rtx *off, machine_mode mode)
4274{
4275 HOST_WIDE_INT mask = GET_MODE_SIZE (mode) < 4 ? 0xfff : 0x3fff;
4276
4277 if (mode == TImode || mode == TFmode ||
4278 (INTVAL (*disp) & (GET_MODE_SIZE (mode) - 1)) != 0)
4279 mask = 0xff;
4280
4281 *off = GEN_INT (INTVAL (*disp) & ~mask);
4282 *disp = GEN_INT (INTVAL (*disp) & mask);
4283 return true;
4284}
4285
43e9d192
IB
4286/* Return TRUE if rtx X is immediate constant 0.0 */
4287bool
3520f7cc 4288aarch64_float_const_zero_rtx_p (rtx x)
43e9d192 4289{
43e9d192
IB
4290 if (GET_MODE (x) == VOIDmode)
4291 return false;
4292
34a72c33 4293 if (REAL_VALUE_MINUS_ZERO (*CONST_DOUBLE_REAL_VALUE (x)))
43e9d192 4294 return !HONOR_SIGNED_ZEROS (GET_MODE (x));
34a72c33 4295 return real_equal (CONST_DOUBLE_REAL_VALUE (x), &dconst0);
43e9d192
IB
4296}
4297
70f09188
AP
4298/* Return the fixed registers used for condition codes. */
4299
4300static bool
4301aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
4302{
4303 *p1 = CC_REGNUM;
4304 *p2 = INVALID_REGNUM;
4305 return true;
4306}
4307
78607708
TV
4308/* Emit call insn with PAT and do aarch64-specific handling. */
4309
d07a3fed 4310void
78607708
TV
4311aarch64_emit_call_insn (rtx pat)
4312{
4313 rtx insn = emit_call_insn (pat);
4314
4315 rtx *fusage = &CALL_INSN_FUNCTION_USAGE (insn);
4316 clobber_reg (fusage, gen_rtx_REG (word_mode, IP0_REGNUM));
4317 clobber_reg (fusage, gen_rtx_REG (word_mode, IP1_REGNUM));
4318}
4319
ef4bddc2 4320machine_mode
43e9d192
IB
4321aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
4322{
4323 /* All floating point compares return CCFP if it is an equality
4324 comparison, and CCFPE otherwise. */
4325 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
4326 {
4327 switch (code)
4328 {
4329 case EQ:
4330 case NE:
4331 case UNORDERED:
4332 case ORDERED:
4333 case UNLT:
4334 case UNLE:
4335 case UNGT:
4336 case UNGE:
4337 case UNEQ:
4338 case LTGT:
4339 return CCFPmode;
4340
4341 case LT:
4342 case LE:
4343 case GT:
4344 case GE:
4345 return CCFPEmode;
4346
4347 default:
4348 gcc_unreachable ();
4349 }
4350 }
4351
2b8568fe
KT
4352 /* Equality comparisons of short modes against zero can be performed
4353 using the TST instruction with the appropriate bitmask. */
4354 if (y == const0_rtx && REG_P (x)
4355 && (code == EQ || code == NE)
4356 && (GET_MODE (x) == HImode || GET_MODE (x) == QImode))
4357 return CC_NZmode;
4358
b06335f9
KT
4359 /* Similarly, comparisons of zero_extends from shorter modes can
4360 be performed using an ANDS with an immediate mask. */
4361 if (y == const0_rtx && GET_CODE (x) == ZERO_EXTEND
4362 && (GET_MODE (x) == SImode || GET_MODE (x) == DImode)
4363 && (GET_MODE (XEXP (x, 0)) == HImode || GET_MODE (XEXP (x, 0)) == QImode)
4364 && (code == EQ || code == NE))
4365 return CC_NZmode;
4366
43e9d192
IB
4367 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
4368 && y == const0_rtx
4369 && (code == EQ || code == NE || code == LT || code == GE)
b056c910 4370 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND
7325d85a
KT
4371 || GET_CODE (x) == NEG
4372 || (GET_CODE (x) == ZERO_EXTRACT && CONST_INT_P (XEXP (x, 1))
4373 && CONST_INT_P (XEXP (x, 2)))))
43e9d192
IB
4374 return CC_NZmode;
4375
1c992d1e 4376 /* A compare with a shifted operand. Because of canonicalization,
43e9d192
IB
4377 the comparison will have to be swapped when we emit the assembly
4378 code. */
4379 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
4aa81c2e 4380 && (REG_P (y) || GET_CODE (y) == SUBREG)
43e9d192
IB
4381 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
4382 || GET_CODE (x) == LSHIFTRT
1c992d1e 4383 || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
43e9d192
IB
4384 return CC_SWPmode;
4385
1c992d1e
RE
4386 /* Similarly for a negated operand, but we can only do this for
4387 equalities. */
4388 if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
4aa81c2e 4389 && (REG_P (y) || GET_CODE (y) == SUBREG)
1c992d1e
RE
4390 && (code == EQ || code == NE)
4391 && GET_CODE (x) == NEG)
4392 return CC_Zmode;
4393
ef22810a
RH
4394 /* A test for unsigned overflow. */
4395 if ((GET_MODE (x) == DImode || GET_MODE (x) == TImode)
4396 && code == NE
4397 && GET_CODE (x) == PLUS
4398 && GET_CODE (y) == ZERO_EXTEND)
4399 return CC_Cmode;
4400
43e9d192
IB
4401 /* For everything else, return CCmode. */
4402 return CCmode;
4403}
4404
3dfa7055
ZC
4405static int
4406aarch64_get_condition_code_1 (enum machine_mode, enum rtx_code);
4407
cd5660ab 4408int
43e9d192
IB
4409aarch64_get_condition_code (rtx x)
4410{
ef4bddc2 4411 machine_mode mode = GET_MODE (XEXP (x, 0));
43e9d192
IB
4412 enum rtx_code comp_code = GET_CODE (x);
4413
4414 if (GET_MODE_CLASS (mode) != MODE_CC)
4415 mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
3dfa7055
ZC
4416 return aarch64_get_condition_code_1 (mode, comp_code);
4417}
43e9d192 4418
3dfa7055
ZC
4419static int
4420aarch64_get_condition_code_1 (enum machine_mode mode, enum rtx_code comp_code)
4421{
43e9d192
IB
4422 switch (mode)
4423 {
4424 case CCFPmode:
4425 case CCFPEmode:
4426 switch (comp_code)
4427 {
4428 case GE: return AARCH64_GE;
4429 case GT: return AARCH64_GT;
4430 case LE: return AARCH64_LS;
4431 case LT: return AARCH64_MI;
4432 case NE: return AARCH64_NE;
4433 case EQ: return AARCH64_EQ;
4434 case ORDERED: return AARCH64_VC;
4435 case UNORDERED: return AARCH64_VS;
4436 case UNLT: return AARCH64_LT;
4437 case UNLE: return AARCH64_LE;
4438 case UNGT: return AARCH64_HI;
4439 case UNGE: return AARCH64_PL;
cd5660ab 4440 default: return -1;
43e9d192
IB
4441 }
4442 break;
4443
4444 case CCmode:
4445 switch (comp_code)
4446 {
4447 case NE: return AARCH64_NE;
4448 case EQ: return AARCH64_EQ;
4449 case GE: return AARCH64_GE;
4450 case GT: return AARCH64_GT;
4451 case LE: return AARCH64_LE;
4452 case LT: return AARCH64_LT;
4453 case GEU: return AARCH64_CS;
4454 case GTU: return AARCH64_HI;
4455 case LEU: return AARCH64_LS;
4456 case LTU: return AARCH64_CC;
cd5660ab 4457 default: return -1;
43e9d192
IB
4458 }
4459 break;
4460
4461 case CC_SWPmode:
43e9d192
IB
4462 switch (comp_code)
4463 {
4464 case NE: return AARCH64_NE;
4465 case EQ: return AARCH64_EQ;
4466 case GE: return AARCH64_LE;
4467 case GT: return AARCH64_LT;
4468 case LE: return AARCH64_GE;
4469 case LT: return AARCH64_GT;
4470 case GEU: return AARCH64_LS;
4471 case GTU: return AARCH64_CC;
4472 case LEU: return AARCH64_CS;
4473 case LTU: return AARCH64_HI;
cd5660ab 4474 default: return -1;
43e9d192
IB
4475 }
4476 break;
4477
4478 case CC_NZmode:
4479 switch (comp_code)
4480 {
4481 case NE: return AARCH64_NE;
4482 case EQ: return AARCH64_EQ;
4483 case GE: return AARCH64_PL;
4484 case LT: return AARCH64_MI;
cd5660ab 4485 default: return -1;
43e9d192
IB
4486 }
4487 break;
4488
1c992d1e
RE
4489 case CC_Zmode:
4490 switch (comp_code)
4491 {
4492 case NE: return AARCH64_NE;
4493 case EQ: return AARCH64_EQ;
cd5660ab 4494 default: return -1;
1c992d1e
RE
4495 }
4496 break;
4497
ef22810a
RH
4498 case CC_Cmode:
4499 switch (comp_code)
4500 {
4501 case NE: return AARCH64_CS;
4502 case EQ: return AARCH64_CC;
4503 default: return -1;
4504 }
4505 break;
4506
43e9d192 4507 default:
cd5660ab 4508 return -1;
43e9d192 4509 }
3dfa7055 4510
3dfa7055 4511 return -1;
43e9d192
IB
4512}
4513
ddeabd3e
AL
4514bool
4515aarch64_const_vec_all_same_in_range_p (rtx x,
4516 HOST_WIDE_INT minval,
4517 HOST_WIDE_INT maxval)
4518{
4519 HOST_WIDE_INT firstval;
4520 int count, i;
4521
4522 if (GET_CODE (x) != CONST_VECTOR
4523 || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
4524 return false;
4525
4526 firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
4527 if (firstval < minval || firstval > maxval)
4528 return false;
4529
4530 count = CONST_VECTOR_NUNITS (x);
4531 for (i = 1; i < count; i++)
4532 if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
4533 return false;
4534
4535 return true;
4536}
4537
4538bool
4539aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
4540{
4541 return aarch64_const_vec_all_same_in_range_p (x, val, val);
4542}
4543
43e9d192 4544
cf670503
ZC
4545/* N Z C V. */
4546#define AARCH64_CC_V 1
4547#define AARCH64_CC_C (1 << 1)
4548#define AARCH64_CC_Z (1 << 2)
4549#define AARCH64_CC_N (1 << 3)
4550
c8012fbc
WD
4551/* N Z C V flags for ccmp. Indexed by AARCH64_COND_CODE. */
4552static const int aarch64_nzcv_codes[] =
4553{
4554 0, /* EQ, Z == 1. */
4555 AARCH64_CC_Z, /* NE, Z == 0. */
4556 0, /* CS, C == 1. */
4557 AARCH64_CC_C, /* CC, C == 0. */
4558 0, /* MI, N == 1. */
4559 AARCH64_CC_N, /* PL, N == 0. */
4560 0, /* VS, V == 1. */
4561 AARCH64_CC_V, /* VC, V == 0. */
4562 0, /* HI, C ==1 && Z == 0. */
4563 AARCH64_CC_C, /* LS, !(C == 1 && Z == 0). */
4564 AARCH64_CC_V, /* GE, N == V. */
4565 0, /* LT, N != V. */
4566 AARCH64_CC_Z, /* GT, Z == 0 && N == V. */
4567 0, /* LE, !(Z == 0 && N == V). */
4568 0, /* AL, Any. */
4569 0 /* NV, Any. */
cf670503
ZC
4570};
4571
cc8ca59e
JB
4572static void
4573aarch64_print_operand (FILE *f, rtx x, int code)
43e9d192
IB
4574{
4575 switch (code)
4576 {
f541a481
KT
4577 /* An integer or symbol address without a preceding # sign. */
4578 case 'c':
4579 switch (GET_CODE (x))
4580 {
4581 case CONST_INT:
4582 fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
4583 break;
4584
4585 case SYMBOL_REF:
4586 output_addr_const (f, x);
4587 break;
4588
4589 case CONST:
4590 if (GET_CODE (XEXP (x, 0)) == PLUS
4591 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
4592 {
4593 output_addr_const (f, x);
4594 break;
4595 }
4596 /* Fall through. */
4597
4598 default:
4599 output_operand_lossage ("Unsupported operand for code '%c'", code);
4600 }
4601 break;
4602
43e9d192
IB
4603 case 'e':
4604 /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
4605 {
4606 int n;
4607
4aa81c2e 4608 if (!CONST_INT_P (x)
43e9d192
IB
4609 || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
4610 {
4611 output_operand_lossage ("invalid operand for '%%%c'", code);
4612 return;
4613 }
4614
4615 switch (n)
4616 {
4617 case 3:
4618 fputc ('b', f);
4619 break;
4620 case 4:
4621 fputc ('h', f);
4622 break;
4623 case 5:
4624 fputc ('w', f);
4625 break;
4626 default:
4627 output_operand_lossage ("invalid operand for '%%%c'", code);
4628 return;
4629 }
4630 }
4631 break;
4632
4633 case 'p':
4634 {
4635 int n;
4636
4637 /* Print N such that 2^N == X. */
4aa81c2e 4638 if (!CONST_INT_P (x) || (n = exact_log2 (INTVAL (x))) < 0)
43e9d192
IB
4639 {
4640 output_operand_lossage ("invalid operand for '%%%c'", code);
4641 return;
4642 }
4643
4644 asm_fprintf (f, "%d", n);
4645 }
4646 break;
4647
4648 case 'P':
4649 /* Print the number of non-zero bits in X (a const_int). */
4aa81c2e 4650 if (!CONST_INT_P (x))
43e9d192
IB
4651 {
4652 output_operand_lossage ("invalid operand for '%%%c'", code);
4653 return;
4654 }
4655
8d55c61b 4656 asm_fprintf (f, "%u", popcount_hwi (INTVAL (x)));
43e9d192
IB
4657 break;
4658
4659 case 'H':
4660 /* Print the higher numbered register of a pair (TImode) of regs. */
4aa81c2e 4661 if (!REG_P (x) || !GP_REGNUM_P (REGNO (x) + 1))
43e9d192
IB
4662 {
4663 output_operand_lossage ("invalid operand for '%%%c'", code);
4664 return;
4665 }
4666
01a3a324 4667 asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]);
43e9d192
IB
4668 break;
4669
43e9d192 4670 case 'M':
c8012fbc 4671 case 'm':
cd5660ab
KT
4672 {
4673 int cond_code;
c8012fbc 4674 /* Print a condition (eq, ne, etc) or its inverse. */
43e9d192 4675
c8012fbc
WD
4676 /* CONST_TRUE_RTX means al/nv (al is the default, don't print it). */
4677 if (x == const_true_rtx)
cd5660ab 4678 {
c8012fbc
WD
4679 if (code == 'M')
4680 fputs ("nv", f);
cd5660ab
KT
4681 return;
4682 }
43e9d192 4683
cd5660ab
KT
4684 if (!COMPARISON_P (x))
4685 {
4686 output_operand_lossage ("invalid operand for '%%%c'", code);
4687 return;
4688 }
c8012fbc 4689
cd5660ab
KT
4690 cond_code = aarch64_get_condition_code (x);
4691 gcc_assert (cond_code >= 0);
c8012fbc
WD
4692 if (code == 'M')
4693 cond_code = AARCH64_INVERSE_CONDITION_CODE (cond_code);
4694 fputs (aarch64_condition_codes[cond_code], f);
cd5660ab 4695 }
43e9d192
IB
4696 break;
4697
4698 case 'b':
4699 case 'h':
4700 case 's':
4701 case 'd':
4702 case 'q':
4703 /* Print a scalar FP/SIMD register name. */
4704 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
4705 {
4706 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
4707 return;
4708 }
50ce6f88 4709 asm_fprintf (f, "%c%d", code, REGNO (x) - V0_REGNUM);
43e9d192
IB
4710 break;
4711
4712 case 'S':
4713 case 'T':
4714 case 'U':
4715 case 'V':
4716 /* Print the first FP/SIMD register name in a list. */
4717 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
4718 {
4719 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
4720 return;
4721 }
50ce6f88 4722 asm_fprintf (f, "v%d", REGNO (x) - V0_REGNUM + (code - 'S'));
43e9d192
IB
4723 break;
4724
2d8c6dc1
AH
4725 case 'R':
4726 /* Print a scalar FP/SIMD register name + 1. */
4727 if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
4728 {
4729 output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
4730 return;
4731 }
4732 asm_fprintf (f, "q%d", REGNO (x) - V0_REGNUM + 1);
4733 break;
4734
a05c0ddf 4735 case 'X':
50d38551 4736 /* Print bottom 16 bits of integer constant in hex. */
4aa81c2e 4737 if (!CONST_INT_P (x))
a05c0ddf
IB
4738 {
4739 output_operand_lossage ("invalid operand for '%%%c'", code);
4740 return;
4741 }
50d38551 4742 asm_fprintf (f, "0x%wx", UINTVAL (x) & 0xffff);
a05c0ddf
IB
4743 break;
4744
43e9d192
IB
4745 case 'w':
4746 case 'x':
4747 /* Print a general register name or the zero register (32-bit or
4748 64-bit). */
3520f7cc
JG
4749 if (x == const0_rtx
4750 || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x)))
43e9d192 4751 {
50ce6f88 4752 asm_fprintf (f, "%czr", code);
43e9d192
IB
4753 break;
4754 }
4755
4756 if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
4757 {
50ce6f88 4758 asm_fprintf (f, "%c%d", code, REGNO (x) - R0_REGNUM);
43e9d192
IB
4759 break;
4760 }
4761
4762 if (REG_P (x) && REGNO (x) == SP_REGNUM)
4763 {
50ce6f88 4764 asm_fprintf (f, "%ssp", code == 'w' ? "w" : "");
43e9d192
IB
4765 break;
4766 }
4767
4768 /* Fall through */
4769
4770 case 0:
4771 /* Print a normal operand, if it's a general register, then we
4772 assume DImode. */
4773 if (x == NULL)
4774 {
4775 output_operand_lossage ("missing operand");
4776 return;
4777 }
4778
4779 switch (GET_CODE (x))
4780 {
4781 case REG:
01a3a324 4782 asm_fprintf (f, "%s", reg_names [REGNO (x)]);
43e9d192
IB
4783 break;
4784
4785 case MEM:
cc8ca59e 4786 output_address (GET_MODE (x), XEXP (x, 0));
43e9d192
IB
4787 break;
4788
2af16a7c 4789 case CONST:
43e9d192
IB
4790 case LABEL_REF:
4791 case SYMBOL_REF:
4792 output_addr_const (asm_out_file, x);
4793 break;
4794
4795 case CONST_INT:
4796 asm_fprintf (f, "%wd", INTVAL (x));
4797 break;
4798
4799 case CONST_VECTOR:
3520f7cc
JG
4800 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT)
4801 {
ddeabd3e
AL
4802 gcc_assert (
4803 aarch64_const_vec_all_same_in_range_p (x,
4804 HOST_WIDE_INT_MIN,
4805 HOST_WIDE_INT_MAX));
3520f7cc
JG
4806 asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
4807 }
4808 else if (aarch64_simd_imm_zero_p (x, GET_MODE (x)))
4809 {
4810 fputc ('0', f);
4811 }
4812 else
4813 gcc_unreachable ();
43e9d192
IB
4814 break;
4815
3520f7cc 4816 case CONST_DOUBLE:
2ca5b430
KT
4817 /* Since we define TARGET_SUPPORTS_WIDE_INT we shouldn't ever
4818 be getting CONST_DOUBLEs holding integers. */
4819 gcc_assert (GET_MODE (x) != VOIDmode);
4820 if (aarch64_float_const_zero_rtx_p (x))
3520f7cc
JG
4821 {
4822 fputc ('0', f);
4823 break;
4824 }
4825 else if (aarch64_float_const_representable_p (x))
4826 {
4827#define buf_size 20
4828 char float_buf[buf_size] = {'\0'};
34a72c33
RS
4829 real_to_decimal_for_mode (float_buf,
4830 CONST_DOUBLE_REAL_VALUE (x),
3520f7cc
JG
4831 buf_size, buf_size,
4832 1, GET_MODE (x));
4833 asm_fprintf (asm_out_file, "%s", float_buf);
4834 break;
4835#undef buf_size
4836 }
4837 output_operand_lossage ("invalid constant");
4838 return;
43e9d192
IB
4839 default:
4840 output_operand_lossage ("invalid operand");
4841 return;
4842 }
4843 break;
4844
4845 case 'A':
4846 if (GET_CODE (x) == HIGH)
4847 x = XEXP (x, 0);
4848
a6e0bfa7 4849 switch (aarch64_classify_symbolic_expression (x))
43e9d192 4850 {
6642bdb4 4851 case SYMBOL_SMALL_GOT_4G:
43e9d192
IB
4852 asm_fprintf (asm_out_file, ":got:");
4853 break;
4854
4855 case SYMBOL_SMALL_TLSGD:
4856 asm_fprintf (asm_out_file, ":tlsgd:");
4857 break;
4858
4859 case SYMBOL_SMALL_TLSDESC:
4860 asm_fprintf (asm_out_file, ":tlsdesc:");
4861 break;
4862
79496620 4863 case SYMBOL_SMALL_TLSIE:
43e9d192
IB
4864 asm_fprintf (asm_out_file, ":gottprel:");
4865 break;
4866
d18ba284 4867 case SYMBOL_TLSLE24:
43e9d192
IB
4868 asm_fprintf (asm_out_file, ":tprel:");
4869 break;
4870
87dd8ab0
MS
4871 case SYMBOL_TINY_GOT:
4872 gcc_unreachable ();
4873 break;
4874
43e9d192
IB
4875 default:
4876 break;
4877 }
4878 output_addr_const (asm_out_file, x);
4879 break;
4880
4881 case 'L':
a6e0bfa7 4882 switch (aarch64_classify_symbolic_expression (x))
43e9d192 4883 {
6642bdb4 4884 case SYMBOL_SMALL_GOT_4G:
43e9d192
IB
4885 asm_fprintf (asm_out_file, ":lo12:");
4886 break;
4887
4888 case SYMBOL_SMALL_TLSGD:
4889 asm_fprintf (asm_out_file, ":tlsgd_lo12:");
4890 break;
4891
4892 case SYMBOL_SMALL_TLSDESC:
4893 asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
4894 break;
4895
79496620 4896 case SYMBOL_SMALL_TLSIE:
43e9d192
IB
4897 asm_fprintf (asm_out_file, ":gottprel_lo12:");
4898 break;
4899
cbf5629e
JW
4900 case SYMBOL_TLSLE12:
4901 asm_fprintf (asm_out_file, ":tprel_lo12:");
4902 break;
4903
d18ba284 4904 case SYMBOL_TLSLE24:
43e9d192
IB
4905 asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
4906 break;
4907
87dd8ab0
MS
4908 case SYMBOL_TINY_GOT:
4909 asm_fprintf (asm_out_file, ":got:");
4910 break;
4911
5ae7caad
JW
4912 case SYMBOL_TINY_TLSIE:
4913 asm_fprintf (asm_out_file, ":gottprel:");
4914 break;
4915
43e9d192
IB
4916 default:
4917 break;
4918 }
4919 output_addr_const (asm_out_file, x);
4920 break;
4921
4922 case 'G':
4923
a6e0bfa7 4924 switch (aarch64_classify_symbolic_expression (x))
43e9d192 4925 {
d18ba284 4926 case SYMBOL_TLSLE24:
43e9d192
IB
4927 asm_fprintf (asm_out_file, ":tprel_hi12:");
4928 break;
4929 default:
4930 break;
4931 }
4932 output_addr_const (asm_out_file, x);
4933 break;
4934
cf670503
ZC
4935 case 'k':
4936 {
c8012fbc 4937 HOST_WIDE_INT cond_code;
cf670503
ZC
4938 /* Print nzcv. */
4939
c8012fbc 4940 if (!CONST_INT_P (x))
cf670503
ZC
4941 {
4942 output_operand_lossage ("invalid operand for '%%%c'", code);
4943 return;
4944 }
4945
c8012fbc
WD
4946 cond_code = INTVAL (x);
4947 gcc_assert (cond_code >= 0 && cond_code <= AARCH64_NV);
4948 asm_fprintf (f, "%d", aarch64_nzcv_codes[cond_code]);
cf670503
ZC
4949 }
4950 break;
4951
43e9d192
IB
4952 default:
4953 output_operand_lossage ("invalid operand prefix '%%%c'", code);
4954 return;
4955 }
4956}
4957
cc8ca59e
JB
4958static void
4959aarch64_print_operand_address (FILE *f, machine_mode mode, rtx x)
43e9d192
IB
4960{
4961 struct aarch64_address_info addr;
4962
cc8ca59e 4963 if (aarch64_classify_address (&addr, x, mode, MEM, true))
43e9d192
IB
4964 switch (addr.type)
4965 {
4966 case ADDRESS_REG_IMM:
4967 if (addr.offset == const0_rtx)
01a3a324 4968 asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]);
43e9d192 4969 else
16a3246f 4970 asm_fprintf (f, "[%s, %wd]", reg_names [REGNO (addr.base)],
43e9d192
IB
4971 INTVAL (addr.offset));
4972 return;
4973
4974 case ADDRESS_REG_REG:
4975 if (addr.shift == 0)
16a3246f 4976 asm_fprintf (f, "[%s, %s]", reg_names [REGNO (addr.base)],
01a3a324 4977 reg_names [REGNO (addr.offset)]);
43e9d192 4978 else
16a3246f 4979 asm_fprintf (f, "[%s, %s, lsl %u]", reg_names [REGNO (addr.base)],
01a3a324 4980 reg_names [REGNO (addr.offset)], addr.shift);
43e9d192
IB
4981 return;
4982
4983 case ADDRESS_REG_UXTW:
4984 if (addr.shift == 0)
16a3246f 4985 asm_fprintf (f, "[%s, w%d, uxtw]", reg_names [REGNO (addr.base)],
43e9d192
IB
4986 REGNO (addr.offset) - R0_REGNUM);
4987 else
16a3246f 4988 asm_fprintf (f, "[%s, w%d, uxtw %u]", reg_names [REGNO (addr.base)],
43e9d192
IB
4989 REGNO (addr.offset) - R0_REGNUM, addr.shift);
4990 return;
4991
4992 case ADDRESS_REG_SXTW:
4993 if (addr.shift == 0)
16a3246f 4994 asm_fprintf (f, "[%s, w%d, sxtw]", reg_names [REGNO (addr.base)],
43e9d192
IB
4995 REGNO (addr.offset) - R0_REGNUM);
4996 else
16a3246f 4997 asm_fprintf (f, "[%s, w%d, sxtw %u]", reg_names [REGNO (addr.base)],
43e9d192
IB
4998 REGNO (addr.offset) - R0_REGNUM, addr.shift);
4999 return;
5000
5001 case ADDRESS_REG_WB:
5002 switch (GET_CODE (x))
5003 {
5004 case PRE_INC:
16a3246f 5005 asm_fprintf (f, "[%s, %d]!", reg_names [REGNO (addr.base)],
cc8ca59e 5006 GET_MODE_SIZE (mode));
43e9d192
IB
5007 return;
5008 case POST_INC:
16a3246f 5009 asm_fprintf (f, "[%s], %d", reg_names [REGNO (addr.base)],
cc8ca59e 5010 GET_MODE_SIZE (mode));
43e9d192
IB
5011 return;
5012 case PRE_DEC:
16a3246f 5013 asm_fprintf (f, "[%s, -%d]!", reg_names [REGNO (addr.base)],
cc8ca59e 5014 GET_MODE_SIZE (mode));
43e9d192
IB
5015 return;
5016 case POST_DEC:
16a3246f 5017 asm_fprintf (f, "[%s], -%d", reg_names [REGNO (addr.base)],
cc8ca59e 5018 GET_MODE_SIZE (mode));
43e9d192
IB
5019 return;
5020 case PRE_MODIFY:
16a3246f 5021 asm_fprintf (f, "[%s, %wd]!", reg_names [REGNO (addr.base)],
43e9d192
IB
5022 INTVAL (addr.offset));
5023 return;
5024 case POST_MODIFY:
16a3246f 5025 asm_fprintf (f, "[%s], %wd", reg_names [REGNO (addr.base)],
43e9d192
IB
5026 INTVAL (addr.offset));
5027 return;
5028 default:
5029 break;
5030 }
5031 break;
5032
5033 case ADDRESS_LO_SUM:
16a3246f 5034 asm_fprintf (f, "[%s, #:lo12:", reg_names [REGNO (addr.base)]);
43e9d192
IB
5035 output_addr_const (f, addr.offset);
5036 asm_fprintf (f, "]");
5037 return;
5038
5039 case ADDRESS_SYMBOLIC:
5040 break;
5041 }
5042
5043 output_addr_const (f, x);
5044}
5045
43e9d192
IB
5046bool
5047aarch64_label_mentioned_p (rtx x)
5048{
5049 const char *fmt;
5050 int i;
5051
5052 if (GET_CODE (x) == LABEL_REF)
5053 return true;
5054
5055 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
5056 referencing instruction, but they are constant offsets, not
5057 symbols. */
5058 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5059 return false;
5060
5061 fmt = GET_RTX_FORMAT (GET_CODE (x));
5062 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5063 {
5064 if (fmt[i] == 'E')
5065 {
5066 int j;
5067
5068 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5069 if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
5070 return 1;
5071 }
5072 else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
5073 return 1;
5074 }
5075
5076 return 0;
5077}
5078
5079/* Implement REGNO_REG_CLASS. */
5080
5081enum reg_class
5082aarch64_regno_regclass (unsigned regno)
5083{
5084 if (GP_REGNUM_P (regno))
a4a182c6 5085 return GENERAL_REGS;
43e9d192
IB
5086
5087 if (regno == SP_REGNUM)
5088 return STACK_REG;
5089
5090 if (regno == FRAME_POINTER_REGNUM
5091 || regno == ARG_POINTER_REGNUM)
f24bb080 5092 return POINTER_REGS;
43e9d192
IB
5093
5094 if (FP_REGNUM_P (regno))
5095 return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS;
5096
5097 return NO_REGS;
5098}
5099
0c4ec427 5100static rtx
ef4bddc2 5101aarch64_legitimize_address (rtx x, rtx /* orig_x */, machine_mode mode)
0c4ec427
RE
5102{
5103 /* Try to split X+CONST into Y=X+(CONST & ~mask), Y+(CONST&mask),
5104 where mask is selected by alignment and size of the offset.
5105 We try to pick as large a range for the offset as possible to
5106 maximize the chance of a CSE. However, for aligned addresses
5107 we limit the range to 4k so that structures with different sized
e8426e0a
BC
5108 elements are likely to use the same base. We need to be careful
5109 not to split a CONST for some forms of address expression, otherwise
5110 it will generate sub-optimal code. */
0c4ec427
RE
5111
5112 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
5113 {
9e0218fc 5114 rtx base = XEXP (x, 0);
17d7bdd8 5115 rtx offset_rtx = XEXP (x, 1);
9e0218fc 5116 HOST_WIDE_INT offset = INTVAL (offset_rtx);
0c4ec427 5117
9e0218fc 5118 if (GET_CODE (base) == PLUS)
e8426e0a 5119 {
9e0218fc
RH
5120 rtx op0 = XEXP (base, 0);
5121 rtx op1 = XEXP (base, 1);
5122
5123 /* Force any scaling into a temp for CSE. */
5124 op0 = force_reg (Pmode, op0);
5125 op1 = force_reg (Pmode, op1);
5126
5127 /* Let the pointer register be in op0. */
5128 if (REG_POINTER (op1))
5129 std::swap (op0, op1);
5130
5131 /* If the pointer is virtual or frame related, then we know that
5132 virtual register instantiation or register elimination is going
5133 to apply a second constant. We want the two constants folded
5134 together easily. Therefore, emit as (OP0 + CONST) + OP1. */
5135 if (virt_or_elim_regno_p (REGNO (op0)))
e8426e0a 5136 {
9e0218fc
RH
5137 base = expand_binop (Pmode, add_optab, op0, offset_rtx,
5138 NULL_RTX, true, OPTAB_DIRECT);
5139 return gen_rtx_PLUS (Pmode, base, op1);
e8426e0a 5140 }
e8426e0a 5141
9e0218fc
RH
5142 /* Otherwise, in order to encourage CSE (and thence loop strength
5143 reduce) scaled addresses, emit as (OP0 + OP1) + CONST. */
5144 base = expand_binop (Pmode, add_optab, op0, op1,
5145 NULL_RTX, true, OPTAB_DIRECT);
5146 x = gen_rtx_PLUS (Pmode, base, offset_rtx);
e8426e0a
BC
5147 }
5148
0c4ec427 5149 /* Does it look like we'll need a load/store-pair operation? */
9e0218fc 5150 HOST_WIDE_INT base_offset;
0c4ec427
RE
5151 if (GET_MODE_SIZE (mode) > 16
5152 || mode == TImode)
5153 base_offset = ((offset + 64 * GET_MODE_SIZE (mode))
5154 & ~((128 * GET_MODE_SIZE (mode)) - 1));
5155 /* For offsets aren't a multiple of the access size, the limit is
5156 -256...255. */
5157 else if (offset & (GET_MODE_SIZE (mode) - 1))
ff0f3f1c
WD
5158 {
5159 base_offset = (offset + 0x100) & ~0x1ff;
5160
5161 /* BLKmode typically uses LDP of X-registers. */
5162 if (mode == BLKmode)
5163 base_offset = (offset + 512) & ~0x3ff;
5164 }
5165 /* Small negative offsets are supported. */
5166 else if (IN_RANGE (offset, -256, 0))
5167 base_offset = 0;
5168 /* Use 12-bit offset by access size. */
0c4ec427 5169 else
ff0f3f1c 5170 base_offset = offset & (~0xfff * GET_MODE_SIZE (mode));
0c4ec427 5171
9e0218fc
RH
5172 if (base_offset != 0)
5173 {
5174 base = plus_constant (Pmode, base, base_offset);
5175 base = force_operand (base, NULL_RTX);
5176 return plus_constant (Pmode, base, offset - base_offset);
5177 }
0c4ec427
RE
5178 }
5179
5180 return x;
5181}
5182
b4f50fd4
RR
5183/* Return the reload icode required for a constant pool in mode. */
5184static enum insn_code
5185aarch64_constant_pool_reload_icode (machine_mode mode)
5186{
5187 switch (mode)
5188 {
5189 case SFmode:
5190 return CODE_FOR_aarch64_reload_movcpsfdi;
5191
5192 case DFmode:
5193 return CODE_FOR_aarch64_reload_movcpdfdi;
5194
5195 case TFmode:
5196 return CODE_FOR_aarch64_reload_movcptfdi;
5197
5198 case V8QImode:
5199 return CODE_FOR_aarch64_reload_movcpv8qidi;
5200
5201 case V16QImode:
5202 return CODE_FOR_aarch64_reload_movcpv16qidi;
5203
5204 case V4HImode:
5205 return CODE_FOR_aarch64_reload_movcpv4hidi;
5206
5207 case V8HImode:
5208 return CODE_FOR_aarch64_reload_movcpv8hidi;
5209
5210 case V2SImode:
5211 return CODE_FOR_aarch64_reload_movcpv2sidi;
5212
5213 case V4SImode:
5214 return CODE_FOR_aarch64_reload_movcpv4sidi;
5215
5216 case V2DImode:
5217 return CODE_FOR_aarch64_reload_movcpv2didi;
5218
5219 case V2DFmode:
5220 return CODE_FOR_aarch64_reload_movcpv2dfdi;
5221
5222 default:
5223 gcc_unreachable ();
5224 }
5225
5226 gcc_unreachable ();
5227}
43e9d192
IB
5228static reg_class_t
5229aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
5230 reg_class_t rclass,
ef4bddc2 5231 machine_mode mode,
43e9d192
IB
5232 secondary_reload_info *sri)
5233{
b4f50fd4
RR
5234
5235 /* If we have to disable direct literal pool loads and stores because the
5236 function is too big, then we need a scratch register. */
5237 if (MEM_P (x) && GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x)
5238 && (SCALAR_FLOAT_MODE_P (GET_MODE (x))
5239 || targetm.vector_mode_supported_p (GET_MODE (x)))
9ee6540a 5240 && !aarch64_pcrelative_literal_loads)
b4f50fd4
RR
5241 {
5242 sri->icode = aarch64_constant_pool_reload_icode (mode);
5243 return NO_REGS;
5244 }
5245
43e9d192
IB
5246 /* Without the TARGET_SIMD instructions we cannot move a Q register
5247 to a Q register directly. We need a scratch. */
5248 if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
5249 && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
5250 && reg_class_subset_p (rclass, FP_REGS))
5251 {
5252 if (mode == TFmode)
5253 sri->icode = CODE_FOR_aarch64_reload_movtf;
5254 else if (mode == TImode)
5255 sri->icode = CODE_FOR_aarch64_reload_movti;
5256 return NO_REGS;
5257 }
5258
5259 /* A TFmode or TImode memory access should be handled via an FP_REGS
5260 because AArch64 has richer addressing modes for LDR/STR instructions
5261 than LDP/STP instructions. */
d5726973 5262 if (TARGET_FLOAT && rclass == GENERAL_REGS
43e9d192
IB
5263 && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
5264 return FP_REGS;
5265
5266 if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
a4a182c6 5267 return GENERAL_REGS;
43e9d192
IB
5268
5269 return NO_REGS;
5270}
5271
5272static bool
5273aarch64_can_eliminate (const int from, const int to)
5274{
5275 /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
5276 HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
5277
5278 if (frame_pointer_needed)
5279 {
5280 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5281 return true;
5282 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
5283 return false;
5284 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
5285 && !cfun->calls_alloca)
5286 return true;
5287 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5288 return true;
0b7f8166
MS
5289
5290 return false;
43e9d192 5291 }
1c923b60
JW
5292 else
5293 {
5294 /* If we decided that we didn't need a leaf frame pointer but then used
5295 LR in the function, then we'll want a frame pointer after all, so
5296 prevent this elimination to ensure a frame pointer is used. */
5297 if (to == STACK_POINTER_REGNUM
5298 && flag_omit_leaf_frame_pointer
5299 && df_regs_ever_live_p (LR_REGNUM))
5300 return false;
5301 }
777e6976 5302
43e9d192
IB
5303 return true;
5304}
5305
5306HOST_WIDE_INT
5307aarch64_initial_elimination_offset (unsigned from, unsigned to)
5308{
43e9d192 5309 aarch64_layout_frame ();
78c29983
MS
5310
5311 if (to == HARD_FRAME_POINTER_REGNUM)
5312 {
5313 if (from == ARG_POINTER_REGNUM)
71bfb77a 5314 return cfun->machine->frame.hard_fp_offset;
78c29983
MS
5315
5316 if (from == FRAME_POINTER_REGNUM)
71bfb77a
WD
5317 return cfun->machine->frame.hard_fp_offset
5318 - cfun->machine->frame.locals_offset;
78c29983
MS
5319 }
5320
5321 if (to == STACK_POINTER_REGNUM)
5322 {
5323 if (from == FRAME_POINTER_REGNUM)
71bfb77a
WD
5324 return cfun->machine->frame.frame_size
5325 - cfun->machine->frame.locals_offset;
78c29983
MS
5326 }
5327
1c960e02 5328 return cfun->machine->frame.frame_size;
43e9d192
IB
5329}
5330
43e9d192
IB
5331/* Implement RETURN_ADDR_RTX. We do not support moving back to a
5332 previous frame. */
5333
5334rtx
5335aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
5336{
5337 if (count != 0)
5338 return const0_rtx;
5339 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
5340}
5341
5342
5343static void
5344aarch64_asm_trampoline_template (FILE *f)
5345{
28514dda
YZ
5346 if (TARGET_ILP32)
5347 {
5348 asm_fprintf (f, "\tldr\tw%d, .+16\n", IP1_REGNUM - R0_REGNUM);
5349 asm_fprintf (f, "\tldr\tw%d, .+16\n", STATIC_CHAIN_REGNUM - R0_REGNUM);
5350 }
5351 else
5352 {
5353 asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]);
5354 asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]);
5355 }
01a3a324 5356 asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
43e9d192 5357 assemble_aligned_integer (4, const0_rtx);
28514dda
YZ
5358 assemble_aligned_integer (POINTER_BYTES, const0_rtx);
5359 assemble_aligned_integer (POINTER_BYTES, const0_rtx);
43e9d192
IB
5360}
5361
5362static void
5363aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5364{
5365 rtx fnaddr, mem, a_tramp;
28514dda 5366 const int tramp_code_sz = 16;
43e9d192
IB
5367
5368 /* Don't need to copy the trailing D-words, we fill those in below. */
5369 emit_block_move (m_tramp, assemble_trampoline_template (),
28514dda
YZ
5370 GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL);
5371 mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz);
43e9d192 5372 fnaddr = XEXP (DECL_RTL (fndecl), 0);
28514dda
YZ
5373 if (GET_MODE (fnaddr) != ptr_mode)
5374 fnaddr = convert_memory_address (ptr_mode, fnaddr);
43e9d192
IB
5375 emit_move_insn (mem, fnaddr);
5376
28514dda 5377 mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz + POINTER_BYTES);
43e9d192
IB
5378 emit_move_insn (mem, chain_value);
5379
5380 /* XXX We should really define a "clear_cache" pattern and use
5381 gen_clear_cache(). */
5382 a_tramp = XEXP (m_tramp, 0);
5383 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
28514dda
YZ
5384 LCT_NORMAL, VOIDmode, 2, a_tramp, ptr_mode,
5385 plus_constant (ptr_mode, a_tramp, TRAMPOLINE_SIZE),
5386 ptr_mode);
43e9d192
IB
5387}
5388
5389static unsigned char
ef4bddc2 5390aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
43e9d192
IB
5391{
5392 switch (regclass)
5393 {
fee9ba42 5394 case CALLER_SAVE_REGS:
43e9d192
IB
5395 case POINTER_REGS:
5396 case GENERAL_REGS:
5397 case ALL_REGS:
5398 case FP_REGS:
5399 case FP_LO_REGS:
5400 return
7bd11911
KT
5401 aarch64_vector_mode_p (mode)
5402 ? (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG
5403 : (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
43e9d192
IB
5404 case STACK_REG:
5405 return 1;
5406
5407 case NO_REGS:
5408 return 0;
5409
5410 default:
5411 break;
5412 }
5413 gcc_unreachable ();
5414}
5415
5416static reg_class_t
78d8b9f0 5417aarch64_preferred_reload_class (rtx x, reg_class_t regclass)
43e9d192 5418{
51bb310d 5419 if (regclass == POINTER_REGS)
78d8b9f0
IB
5420 return GENERAL_REGS;
5421
51bb310d
MS
5422 if (regclass == STACK_REG)
5423 {
5424 if (REG_P(x)
5425 && reg_class_subset_p (REGNO_REG_CLASS (REGNO (x)), POINTER_REGS))
5426 return regclass;
5427
5428 return NO_REGS;
5429 }
5430
78d8b9f0
IB
5431 /* If it's an integer immediate that MOVI can't handle, then
5432 FP_REGS is not an option, so we return NO_REGS instead. */
5433 if (CONST_INT_P (x) && reg_class_subset_p (regclass, FP_REGS)
5434 && !aarch64_simd_imm_scalar_p (x, GET_MODE (x)))
5435 return NO_REGS;
5436
27bd251b
IB
5437 /* Register eliminiation can result in a request for
5438 SP+constant->FP_REGS. We cannot support such operations which
5439 use SP as source and an FP_REG as destination, so reject out
5440 right now. */
5441 if (! reg_class_subset_p (regclass, GENERAL_REGS) && GET_CODE (x) == PLUS)
5442 {
5443 rtx lhs = XEXP (x, 0);
5444
5445 /* Look through a possible SUBREG introduced by ILP32. */
5446 if (GET_CODE (lhs) == SUBREG)
5447 lhs = SUBREG_REG (lhs);
5448
5449 gcc_assert (REG_P (lhs));
5450 gcc_assert (reg_class_subset_p (REGNO_REG_CLASS (REGNO (lhs)),
5451 POINTER_REGS));
5452 return NO_REGS;
5453 }
5454
78d8b9f0 5455 return regclass;
43e9d192
IB
5456}
5457
5458void
5459aarch64_asm_output_labelref (FILE* f, const char *name)
5460{
5461 asm_fprintf (f, "%U%s", name);
5462}
5463
5464static void
5465aarch64_elf_asm_constructor (rtx symbol, int priority)
5466{
5467 if (priority == DEFAULT_INIT_PRIORITY)
5468 default_ctor_section_asm_out_constructor (symbol, priority);
5469 else
5470 {
5471 section *s;
5472 char buf[18];
5473 snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
5474 s = get_section (buf, SECTION_WRITE, NULL);
5475 switch_to_section (s);
5476 assemble_align (POINTER_SIZE);
28514dda 5477 assemble_aligned_integer (POINTER_BYTES, symbol);
43e9d192
IB
5478 }
5479}
5480
5481static void
5482aarch64_elf_asm_destructor (rtx symbol, int priority)
5483{
5484 if (priority == DEFAULT_INIT_PRIORITY)
5485 default_dtor_section_asm_out_destructor (symbol, priority);
5486 else
5487 {
5488 section *s;
5489 char buf[18];
5490 snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
5491 s = get_section (buf, SECTION_WRITE, NULL);
5492 switch_to_section (s);
5493 assemble_align (POINTER_SIZE);
28514dda 5494 assemble_aligned_integer (POINTER_BYTES, symbol);
43e9d192
IB
5495 }
5496}
5497
5498const char*
5499aarch64_output_casesi (rtx *operands)
5500{
5501 char buf[100];
5502 char label[100];
b32d5189 5503 rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
43e9d192
IB
5504 int index;
5505 static const char *const patterns[4][2] =
5506 {
5507 {
5508 "ldrb\t%w3, [%0,%w1,uxtw]",
5509 "add\t%3, %4, %w3, sxtb #2"
5510 },
5511 {
5512 "ldrh\t%w3, [%0,%w1,uxtw #1]",
5513 "add\t%3, %4, %w3, sxth #2"
5514 },
5515 {
5516 "ldr\t%w3, [%0,%w1,uxtw #2]",
5517 "add\t%3, %4, %w3, sxtw #2"
5518 },
5519 /* We assume that DImode is only generated when not optimizing and
5520 that we don't really need 64-bit address offsets. That would
5521 imply an object file with 8GB of code in a single function! */
5522 {
5523 "ldr\t%w3, [%0,%w1,uxtw #2]",
5524 "add\t%3, %4, %w3, sxtw #2"
5525 }
5526 };
5527
5528 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
5529
5530 index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
5531
5532 gcc_assert (index >= 0 && index <= 3);
5533
5534 /* Need to implement table size reduction, by chaning the code below. */
5535 output_asm_insn (patterns[index][0], operands);
5536 ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
5537 snprintf (buf, sizeof (buf),
5538 "adr\t%%4, %s", targetm.strip_name_encoding (label));
5539 output_asm_insn (buf, operands);
5540 output_asm_insn (patterns[index][1], operands);
5541 output_asm_insn ("br\t%3", operands);
5542 assemble_label (asm_out_file, label);
5543 return "";
5544}
5545
5546
5547/* Return size in bits of an arithmetic operand which is shifted/scaled and
5548 masked such that it is suitable for a UXTB, UXTH, or UXTW extend
5549 operator. */
5550
5551int
5552aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
5553{
5554 if (shift >= 0 && shift <= 3)
5555 {
5556 int size;
5557 for (size = 8; size <= 32; size *= 2)
5558 {
5559 HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
5560 if (mask == bits << shift)
5561 return size;
5562 }
5563 }
5564 return 0;
5565}
5566
e78d485e
RR
5567/* Constant pools are per function only when PC relative
5568 literal loads are true or we are in the large memory
5569 model. */
5570
5571static inline bool
5572aarch64_can_use_per_function_literal_pools_p (void)
5573{
9ee6540a 5574 return (aarch64_pcrelative_literal_loads
e78d485e
RR
5575 || aarch64_cmodel == AARCH64_CMODEL_LARGE);
5576}
5577
43e9d192 5578static bool
e78d485e 5579aarch64_use_blocks_for_constant_p (machine_mode, const_rtx)
43e9d192 5580{
3eece53d
RR
5581 /* Fixme:: In an ideal world this would work similar
5582 to the logic in aarch64_select_rtx_section but this
5583 breaks bootstrap in gcc go. For now we workaround
5584 this by returning false here. */
5585 return false;
43e9d192
IB
5586}
5587
e78d485e
RR
5588/* Select appropriate section for constants depending
5589 on where we place literal pools. */
5590
43e9d192 5591static section *
e78d485e
RR
5592aarch64_select_rtx_section (machine_mode mode,
5593 rtx x,
5594 unsigned HOST_WIDE_INT align)
43e9d192 5595{
e78d485e
RR
5596 if (aarch64_can_use_per_function_literal_pools_p ())
5597 return function_section (current_function_decl);
43e9d192 5598
e78d485e
RR
5599 return default_elf_select_rtx_section (mode, x, align);
5600}
43e9d192 5601
5fca7b66
RH
5602/* Implement ASM_OUTPUT_POOL_EPILOGUE. */
5603void
5604aarch64_asm_output_pool_epilogue (FILE *f, const char *, tree,
5605 HOST_WIDE_INT offset)
5606{
5607 /* When using per-function literal pools, we must ensure that any code
5608 section is aligned to the minimal instruction length, lest we get
5609 errors from the assembler re "unaligned instructions". */
5610 if ((offset & 3) && aarch64_can_use_per_function_literal_pools_p ())
5611 ASM_OUTPUT_ALIGN (f, 2);
5612}
5613
43e9d192
IB
5614/* Costs. */
5615
5616/* Helper function for rtx cost calculation. Strip a shift expression
5617 from X. Returns the inner operand if successful, or the original
5618 expression on failure. */
5619static rtx
5620aarch64_strip_shift (rtx x)
5621{
5622 rtx op = x;
5623
57b77d46
RE
5624 /* We accept both ROTATERT and ROTATE: since the RHS must be a constant
5625 we can convert both to ROR during final output. */
43e9d192
IB
5626 if ((GET_CODE (op) == ASHIFT
5627 || GET_CODE (op) == ASHIFTRT
57b77d46
RE
5628 || GET_CODE (op) == LSHIFTRT
5629 || GET_CODE (op) == ROTATERT
5630 || GET_CODE (op) == ROTATE)
43e9d192
IB
5631 && CONST_INT_P (XEXP (op, 1)))
5632 return XEXP (op, 0);
5633
5634 if (GET_CODE (op) == MULT
5635 && CONST_INT_P (XEXP (op, 1))
5636 && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
5637 return XEXP (op, 0);
5638
5639 return x;
5640}
5641
4745e701 5642/* Helper function for rtx cost calculation. Strip an extend
43e9d192
IB
5643 expression from X. Returns the inner operand if successful, or the
5644 original expression on failure. We deal with a number of possible
5645 canonicalization variations here. */
5646static rtx
4745e701 5647aarch64_strip_extend (rtx x)
43e9d192
IB
5648{
5649 rtx op = x;
5650
5651 /* Zero and sign extraction of a widened value. */
5652 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
5653 && XEXP (op, 2) == const0_rtx
4745e701 5654 && GET_CODE (XEXP (op, 0)) == MULT
43e9d192
IB
5655 && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
5656 XEXP (op, 1)))
5657 return XEXP (XEXP (op, 0), 0);
5658
5659 /* It can also be represented (for zero-extend) as an AND with an
5660 immediate. */
5661 if (GET_CODE (op) == AND
5662 && GET_CODE (XEXP (op, 0)) == MULT
5663 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
5664 && CONST_INT_P (XEXP (op, 1))
5665 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
5666 INTVAL (XEXP (op, 1))) != 0)
5667 return XEXP (XEXP (op, 0), 0);
5668
5669 /* Now handle extended register, as this may also have an optional
5670 left shift by 1..4. */
5671 if (GET_CODE (op) == ASHIFT
5672 && CONST_INT_P (XEXP (op, 1))
5673 && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
5674 op = XEXP (op, 0);
5675
5676 if (GET_CODE (op) == ZERO_EXTEND
5677 || GET_CODE (op) == SIGN_EXTEND)
5678 op = XEXP (op, 0);
5679
5680 if (op != x)
5681 return op;
5682
4745e701
JG
5683 return x;
5684}
5685
0a78ebe4
KT
5686/* Return true iff CODE is a shift supported in combination
5687 with arithmetic instructions. */
4d1919ed 5688
0a78ebe4
KT
5689static bool
5690aarch64_shift_p (enum rtx_code code)
5691{
5692 return code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT;
5693}
5694
4745e701 5695/* Helper function for rtx cost calculation. Calculate the cost of
0a78ebe4
KT
5696 a MULT or ASHIFT, which may be part of a compound PLUS/MINUS rtx.
5697 Return the calculated cost of the expression, recursing manually in to
4745e701
JG
5698 operands where needed. */
5699
5700static int
e548c9df 5701aarch64_rtx_mult_cost (rtx x, enum rtx_code code, int outer, bool speed)
4745e701
JG
5702{
5703 rtx op0, op1;
5704 const struct cpu_cost_table *extra_cost
b175b679 5705 = aarch64_tune_params.insn_extra_cost;
4745e701 5706 int cost = 0;
0a78ebe4 5707 bool compound_p = (outer == PLUS || outer == MINUS);
ef4bddc2 5708 machine_mode mode = GET_MODE (x);
4745e701
JG
5709
5710 gcc_checking_assert (code == MULT);
5711
5712 op0 = XEXP (x, 0);
5713 op1 = XEXP (x, 1);
5714
5715 if (VECTOR_MODE_P (mode))
5716 mode = GET_MODE_INNER (mode);
5717
5718 /* Integer multiply/fma. */
5719 if (GET_MODE_CLASS (mode) == MODE_INT)
5720 {
5721 /* The multiply will be canonicalized as a shift, cost it as such. */
0a78ebe4
KT
5722 if (aarch64_shift_p (GET_CODE (x))
5723 || (CONST_INT_P (op1)
5724 && exact_log2 (INTVAL (op1)) > 0))
4745e701 5725 {
0a78ebe4
KT
5726 bool is_extend = GET_CODE (op0) == ZERO_EXTEND
5727 || GET_CODE (op0) == SIGN_EXTEND;
4745e701
JG
5728 if (speed)
5729 {
0a78ebe4
KT
5730 if (compound_p)
5731 {
5732 if (REG_P (op1))
5733 /* ARITH + shift-by-register. */
5734 cost += extra_cost->alu.arith_shift_reg;
5735 else if (is_extend)
5736 /* ARITH + extended register. We don't have a cost field
5737 for ARITH+EXTEND+SHIFT, so use extend_arith here. */
5738 cost += extra_cost->alu.extend_arith;
5739 else
5740 /* ARITH + shift-by-immediate. */
5741 cost += extra_cost->alu.arith_shift;
5742 }
4745e701
JG
5743 else
5744 /* LSL (immediate). */
0a78ebe4
KT
5745 cost += extra_cost->alu.shift;
5746
4745e701 5747 }
0a78ebe4
KT
5748 /* Strip extends as we will have costed them in the case above. */
5749 if (is_extend)
5750 op0 = aarch64_strip_extend (op0);
4745e701 5751
e548c9df 5752 cost += rtx_cost (op0, VOIDmode, code, 0, speed);
4745e701
JG
5753
5754 return cost;
5755 }
5756
d2ac256b
KT
5757 /* MNEG or [US]MNEGL. Extract the NEG operand and indicate that it's a
5758 compound and let the below cases handle it. After all, MNEG is a
5759 special-case alias of MSUB. */
5760 if (GET_CODE (op0) == NEG)
5761 {
5762 op0 = XEXP (op0, 0);
5763 compound_p = true;
5764 }
5765
4745e701
JG
5766 /* Integer multiplies or FMAs have zero/sign extending variants. */
5767 if ((GET_CODE (op0) == ZERO_EXTEND
5768 && GET_CODE (op1) == ZERO_EXTEND)
5769 || (GET_CODE (op0) == SIGN_EXTEND
5770 && GET_CODE (op1) == SIGN_EXTEND))
5771 {
e548c9df
AM
5772 cost += rtx_cost (XEXP (op0, 0), VOIDmode, MULT, 0, speed);
5773 cost += rtx_cost (XEXP (op1, 0), VOIDmode, MULT, 1, speed);
4745e701
JG
5774
5775 if (speed)
5776 {
0a78ebe4 5777 if (compound_p)
d2ac256b 5778 /* SMADDL/UMADDL/UMSUBL/SMSUBL. */
4745e701
JG
5779 cost += extra_cost->mult[0].extend_add;
5780 else
5781 /* MUL/SMULL/UMULL. */
5782 cost += extra_cost->mult[0].extend;
5783 }
5784
5785 return cost;
5786 }
5787
d2ac256b 5788 /* This is either an integer multiply or a MADD. In both cases
4745e701 5789 we want to recurse and cost the operands. */
e548c9df
AM
5790 cost += rtx_cost (op0, mode, MULT, 0, speed);
5791 cost += rtx_cost (op1, mode, MULT, 1, speed);
4745e701
JG
5792
5793 if (speed)
5794 {
0a78ebe4 5795 if (compound_p)
d2ac256b 5796 /* MADD/MSUB. */
4745e701
JG
5797 cost += extra_cost->mult[mode == DImode].add;
5798 else
5799 /* MUL. */
5800 cost += extra_cost->mult[mode == DImode].simple;
5801 }
5802
5803 return cost;
5804 }
5805 else
5806 {
5807 if (speed)
5808 {
3d840f7d 5809 /* Floating-point FMA/FMUL can also support negations of the
d318517d
SN
5810 operands, unless the rounding mode is upward or downward in
5811 which case FNMUL is different than FMUL with operand negation. */
5812 bool neg0 = GET_CODE (op0) == NEG;
5813 bool neg1 = GET_CODE (op1) == NEG;
5814 if (compound_p || !flag_rounding_math || (neg0 && neg1))
5815 {
5816 if (neg0)
5817 op0 = XEXP (op0, 0);
5818 if (neg1)
5819 op1 = XEXP (op1, 0);
5820 }
4745e701 5821
0a78ebe4 5822 if (compound_p)
4745e701
JG
5823 /* FMADD/FNMADD/FNMSUB/FMSUB. */
5824 cost += extra_cost->fp[mode == DFmode].fma;
5825 else
3d840f7d 5826 /* FMUL/FNMUL. */
4745e701
JG
5827 cost += extra_cost->fp[mode == DFmode].mult;
5828 }
5829
e548c9df
AM
5830 cost += rtx_cost (op0, mode, MULT, 0, speed);
5831 cost += rtx_cost (op1, mode, MULT, 1, speed);
4745e701
JG
5832 return cost;
5833 }
43e9d192
IB
5834}
5835
67747367
JG
5836static int
5837aarch64_address_cost (rtx x,
ef4bddc2 5838 machine_mode mode,
67747367
JG
5839 addr_space_t as ATTRIBUTE_UNUSED,
5840 bool speed)
5841{
5842 enum rtx_code c = GET_CODE (x);
b175b679 5843 const struct cpu_addrcost_table *addr_cost = aarch64_tune_params.addr_cost;
67747367
JG
5844 struct aarch64_address_info info;
5845 int cost = 0;
5846 info.shift = 0;
5847
5848 if (!aarch64_classify_address (&info, x, mode, c, false))
5849 {
5850 if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF)
5851 {
5852 /* This is a CONST or SYMBOL ref which will be split
5853 in a different way depending on the code model in use.
5854 Cost it through the generic infrastructure. */
e548c9df 5855 int cost_symbol_ref = rtx_cost (x, Pmode, MEM, 1, speed);
67747367
JG
5856 /* Divide through by the cost of one instruction to
5857 bring it to the same units as the address costs. */
5858 cost_symbol_ref /= COSTS_N_INSNS (1);
5859 /* The cost is then the cost of preparing the address,
5860 followed by an immediate (possibly 0) offset. */
5861 return cost_symbol_ref + addr_cost->imm_offset;
5862 }
5863 else
5864 {
5865 /* This is most likely a jump table from a case
5866 statement. */
5867 return addr_cost->register_offset;
5868 }
5869 }
5870
5871 switch (info.type)
5872 {
5873 case ADDRESS_LO_SUM:
5874 case ADDRESS_SYMBOLIC:
5875 case ADDRESS_REG_IMM:
5876 cost += addr_cost->imm_offset;
5877 break;
5878
5879 case ADDRESS_REG_WB:
5880 if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
5881 cost += addr_cost->pre_modify;
5882 else if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
5883 cost += addr_cost->post_modify;
5884 else
5885 gcc_unreachable ();
5886
5887 break;
5888
5889 case ADDRESS_REG_REG:
5890 cost += addr_cost->register_offset;
5891 break;
5892
67747367 5893 case ADDRESS_REG_SXTW:
783879e6
EM
5894 cost += addr_cost->register_sextend;
5895 break;
5896
5897 case ADDRESS_REG_UXTW:
5898 cost += addr_cost->register_zextend;
67747367
JG
5899 break;
5900
5901 default:
5902 gcc_unreachable ();
5903 }
5904
5905
5906 if (info.shift > 0)
5907 {
5908 /* For the sake of calculating the cost of the shifted register
5909 component, we can treat same sized modes in the same way. */
5910 switch (GET_MODE_BITSIZE (mode))
5911 {
5912 case 16:
5913 cost += addr_cost->addr_scale_costs.hi;
5914 break;
5915
5916 case 32:
5917 cost += addr_cost->addr_scale_costs.si;
5918 break;
5919
5920 case 64:
5921 cost += addr_cost->addr_scale_costs.di;
5922 break;
5923
5924 /* We can't tell, or this is a 128-bit vector. */
5925 default:
5926 cost += addr_cost->addr_scale_costs.ti;
5927 break;
5928 }
5929 }
5930
5931 return cost;
5932}
5933
b9066f5a
MW
5934/* Return the cost of a branch. If SPEED_P is true then the compiler is
5935 optimizing for speed. If PREDICTABLE_P is true then the branch is predicted
5936 to be taken. */
5937
5938int
5939aarch64_branch_cost (bool speed_p, bool predictable_p)
5940{
5941 /* When optimizing for speed, use the cost of unpredictable branches. */
5942 const struct cpu_branch_cost *branch_costs =
b175b679 5943 aarch64_tune_params.branch_costs;
b9066f5a
MW
5944
5945 if (!speed_p || predictable_p)
5946 return branch_costs->predictable;
5947 else
5948 return branch_costs->unpredictable;
5949}
5950
7cc2145f
JG
5951/* Return true if the RTX X in mode MODE is a zero or sign extract
5952 usable in an ADD or SUB (extended register) instruction. */
5953static bool
ef4bddc2 5954aarch64_rtx_arith_op_extract_p (rtx x, machine_mode mode)
7cc2145f
JG
5955{
5956 /* Catch add with a sign extract.
5957 This is add_<optab><mode>_multp2. */
5958 if (GET_CODE (x) == SIGN_EXTRACT
5959 || GET_CODE (x) == ZERO_EXTRACT)
5960 {
5961 rtx op0 = XEXP (x, 0);
5962 rtx op1 = XEXP (x, 1);
5963 rtx op2 = XEXP (x, 2);
5964
5965 if (GET_CODE (op0) == MULT
5966 && CONST_INT_P (op1)
5967 && op2 == const0_rtx
5968 && CONST_INT_P (XEXP (op0, 1))
5969 && aarch64_is_extend_from_extract (mode,
5970 XEXP (op0, 1),
5971 op1))
5972 {
5973 return true;
5974 }
5975 }
e47c4031
KT
5976 /* The simple case <ARITH>, XD, XN, XM, [us]xt.
5977 No shift. */
5978 else if (GET_CODE (x) == SIGN_EXTEND
5979 || GET_CODE (x) == ZERO_EXTEND)
5980 return REG_P (XEXP (x, 0));
7cc2145f
JG
5981
5982 return false;
5983}
5984
61263118
KT
5985static bool
5986aarch64_frint_unspec_p (unsigned int u)
5987{
5988 switch (u)
5989 {
5990 case UNSPEC_FRINTZ:
5991 case UNSPEC_FRINTP:
5992 case UNSPEC_FRINTM:
5993 case UNSPEC_FRINTA:
5994 case UNSPEC_FRINTN:
5995 case UNSPEC_FRINTX:
5996 case UNSPEC_FRINTI:
5997 return true;
5998
5999 default:
6000 return false;
6001 }
6002}
6003
fb0cb7fa
KT
6004/* Return true iff X is an rtx that will match an extr instruction
6005 i.e. as described in the *extr<mode>5_insn family of patterns.
6006 OP0 and OP1 will be set to the operands of the shifts involved
6007 on success and will be NULL_RTX otherwise. */
6008
6009static bool
6010aarch64_extr_rtx_p (rtx x, rtx *res_op0, rtx *res_op1)
6011{
6012 rtx op0, op1;
6013 machine_mode mode = GET_MODE (x);
6014
6015 *res_op0 = NULL_RTX;
6016 *res_op1 = NULL_RTX;
6017
6018 if (GET_CODE (x) != IOR)
6019 return false;
6020
6021 op0 = XEXP (x, 0);
6022 op1 = XEXP (x, 1);
6023
6024 if ((GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT)
6025 || (GET_CODE (op1) == ASHIFT && GET_CODE (op0) == LSHIFTRT))
6026 {
6027 /* Canonicalise locally to ashift in op0, lshiftrt in op1. */
6028 if (GET_CODE (op1) == ASHIFT)
6029 std::swap (op0, op1);
6030
6031 if (!CONST_INT_P (XEXP (op0, 1)) || !CONST_INT_P (XEXP (op1, 1)))
6032 return false;
6033
6034 unsigned HOST_WIDE_INT shft_amnt_0 = UINTVAL (XEXP (op0, 1));
6035 unsigned HOST_WIDE_INT shft_amnt_1 = UINTVAL (XEXP (op1, 1));
6036
6037 if (shft_amnt_0 < GET_MODE_BITSIZE (mode)
6038 && shft_amnt_0 + shft_amnt_1 == GET_MODE_BITSIZE (mode))
6039 {
6040 *res_op0 = XEXP (op0, 0);
6041 *res_op1 = XEXP (op1, 0);
6042 return true;
6043 }
6044 }
6045
6046 return false;
6047}
6048
2d5ffe46
AP
6049/* Calculate the cost of calculating (if_then_else (OP0) (OP1) (OP2)),
6050 storing it in *COST. Result is true if the total cost of the operation
6051 has now been calculated. */
6052static bool
6053aarch64_if_then_else_costs (rtx op0, rtx op1, rtx op2, int *cost, bool speed)
6054{
b9e3afe9
AP
6055 rtx inner;
6056 rtx comparator;
6057 enum rtx_code cmpcode;
6058
6059 if (COMPARISON_P (op0))
6060 {
6061 inner = XEXP (op0, 0);
6062 comparator = XEXP (op0, 1);
6063 cmpcode = GET_CODE (op0);
6064 }
6065 else
6066 {
6067 inner = op0;
6068 comparator = const0_rtx;
6069 cmpcode = NE;
6070 }
6071
2d5ffe46
AP
6072 if (GET_CODE (op1) == PC || GET_CODE (op2) == PC)
6073 {
6074 /* Conditional branch. */
b9e3afe9 6075 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_CC)
2d5ffe46
AP
6076 return true;
6077 else
6078 {
b9e3afe9 6079 if (cmpcode == NE || cmpcode == EQ)
2d5ffe46 6080 {
2d5ffe46
AP
6081 if (comparator == const0_rtx)
6082 {
6083 /* TBZ/TBNZ/CBZ/CBNZ. */
6084 if (GET_CODE (inner) == ZERO_EXTRACT)
6085 /* TBZ/TBNZ. */
e548c9df
AM
6086 *cost += rtx_cost (XEXP (inner, 0), VOIDmode,
6087 ZERO_EXTRACT, 0, speed);
6088 else
6089 /* CBZ/CBNZ. */
6090 *cost += rtx_cost (inner, VOIDmode, cmpcode, 0, speed);
2d5ffe46
AP
6091
6092 return true;
6093 }
6094 }
b9e3afe9 6095 else if (cmpcode == LT || cmpcode == GE)
2d5ffe46 6096 {
2d5ffe46
AP
6097 /* TBZ/TBNZ. */
6098 if (comparator == const0_rtx)
6099 return true;
6100 }
6101 }
6102 }
b9e3afe9 6103 else if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_CC)
2d5ffe46 6104 {
786298dc 6105 /* CCMP. */
6dfeb7ce 6106 if (GET_CODE (op1) == COMPARE)
786298dc
WD
6107 {
6108 /* Increase cost of CCMP reg, 0, imm, CC to prefer CMP reg, 0. */
6109 if (XEXP (op1, 1) == const0_rtx)
6110 *cost += 1;
6111 if (speed)
6112 {
6113 machine_mode mode = GET_MODE (XEXP (op1, 0));
6114 const struct cpu_cost_table *extra_cost
6115 = aarch64_tune_params.insn_extra_cost;
6116
6117 if (GET_MODE_CLASS (mode) == MODE_INT)
6118 *cost += extra_cost->alu.arith;
6119 else
6120 *cost += extra_cost->fp[mode == DFmode].compare;
6121 }
6122 return true;
6123 }
6124
2d5ffe46
AP
6125 /* It's a conditional operation based on the status flags,
6126 so it must be some flavor of CSEL. */
6127
6128 /* CSNEG, CSINV, and CSINC are handled for free as part of CSEL. */
6129 if (GET_CODE (op1) == NEG
6130 || GET_CODE (op1) == NOT
6131 || (GET_CODE (op1) == PLUS && XEXP (op1, 1) == const1_rtx))
6132 op1 = XEXP (op1, 0);
bad00732
KT
6133 else if (GET_CODE (op1) == ZERO_EXTEND && GET_CODE (op2) == ZERO_EXTEND)
6134 {
6135 /* CSEL with zero-extension (*cmovdi_insn_uxtw). */
6136 op1 = XEXP (op1, 0);
6137 op2 = XEXP (op2, 0);
6138 }
2d5ffe46 6139
e548c9df
AM
6140 *cost += rtx_cost (op1, VOIDmode, IF_THEN_ELSE, 1, speed);
6141 *cost += rtx_cost (op2, VOIDmode, IF_THEN_ELSE, 2, speed);
2d5ffe46
AP
6142 return true;
6143 }
6144
6145 /* We don't know what this is, cost all operands. */
6146 return false;
6147}
6148
283b6c85
KT
6149/* Check whether X is a bitfield operation of the form shift + extend that
6150 maps down to a UBFIZ/SBFIZ/UBFX/SBFX instruction. If so, return the
6151 operand to which the bitfield operation is applied. Otherwise return
6152 NULL_RTX. */
6153
6154static rtx
6155aarch64_extend_bitfield_pattern_p (rtx x)
6156{
6157 rtx_code outer_code = GET_CODE (x);
6158 machine_mode outer_mode = GET_MODE (x);
6159
6160 if (outer_code != ZERO_EXTEND && outer_code != SIGN_EXTEND
6161 && outer_mode != SImode && outer_mode != DImode)
6162 return NULL_RTX;
6163
6164 rtx inner = XEXP (x, 0);
6165 rtx_code inner_code = GET_CODE (inner);
6166 machine_mode inner_mode = GET_MODE (inner);
6167 rtx op = NULL_RTX;
6168
6169 switch (inner_code)
6170 {
6171 case ASHIFT:
6172 if (CONST_INT_P (XEXP (inner, 1))
6173 && (inner_mode == QImode || inner_mode == HImode))
6174 op = XEXP (inner, 0);
6175 break;
6176 case LSHIFTRT:
6177 if (outer_code == ZERO_EXTEND && CONST_INT_P (XEXP (inner, 1))
6178 && (inner_mode == QImode || inner_mode == HImode))
6179 op = XEXP (inner, 0);
6180 break;
6181 case ASHIFTRT:
6182 if (outer_code == SIGN_EXTEND && CONST_INT_P (XEXP (inner, 1))
6183 && (inner_mode == QImode || inner_mode == HImode))
6184 op = XEXP (inner, 0);
6185 break;
6186 default:
6187 break;
6188 }
6189
6190 return op;
6191}
6192
8c83f71d
KT
6193/* Return true if the mask and a shift amount from an RTX of the form
6194 (x << SHFT_AMNT) & MASK are valid to combine into a UBFIZ instruction of
6195 mode MODE. See the *andim_ashift<mode>_bfiz pattern. */
6196
6197bool
6198aarch64_mask_and_shift_for_ubfiz_p (machine_mode mode, rtx mask, rtx shft_amnt)
6199{
6200 return CONST_INT_P (mask) && CONST_INT_P (shft_amnt)
6201 && INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
6202 && exact_log2 ((INTVAL (mask) >> INTVAL (shft_amnt)) + 1) >= 0
6203 && (INTVAL (mask) & ((1 << INTVAL (shft_amnt)) - 1)) == 0;
6204}
6205
43e9d192
IB
6206/* Calculate the cost of calculating X, storing it in *COST. Result
6207 is true if the total cost of the operation has now been calculated. */
6208static bool
e548c9df 6209aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
43e9d192
IB
6210 int param ATTRIBUTE_UNUSED, int *cost, bool speed)
6211{
a8eecd00 6212 rtx op0, op1, op2;
73250c4c 6213 const struct cpu_cost_table *extra_cost
b175b679 6214 = aarch64_tune_params.insn_extra_cost;
e548c9df 6215 int code = GET_CODE (x);
43e9d192 6216
7fc5ef02
JG
6217 /* By default, assume that everything has equivalent cost to the
6218 cheapest instruction. Any additional costs are applied as a delta
6219 above this default. */
6220 *cost = COSTS_N_INSNS (1);
6221
43e9d192
IB
6222 switch (code)
6223 {
6224 case SET:
ba123b0d
JG
6225 /* The cost depends entirely on the operands to SET. */
6226 *cost = 0;
43e9d192
IB
6227 op0 = SET_DEST (x);
6228 op1 = SET_SRC (x);
6229
6230 switch (GET_CODE (op0))
6231 {
6232 case MEM:
6233 if (speed)
2961177e
JG
6234 {
6235 rtx address = XEXP (op0, 0);
b6875aac
KV
6236 if (VECTOR_MODE_P (mode))
6237 *cost += extra_cost->ldst.storev;
6238 else if (GET_MODE_CLASS (mode) == MODE_INT)
2961177e
JG
6239 *cost += extra_cost->ldst.store;
6240 else if (mode == SFmode)
6241 *cost += extra_cost->ldst.storef;
6242 else if (mode == DFmode)
6243 *cost += extra_cost->ldst.stored;
6244
6245 *cost +=
6246 COSTS_N_INSNS (aarch64_address_cost (address, mode,
6247 0, speed));
6248 }
43e9d192 6249
e548c9df 6250 *cost += rtx_cost (op1, mode, SET, 1, speed);
43e9d192
IB
6251 return true;
6252
6253 case SUBREG:
6254 if (! REG_P (SUBREG_REG (op0)))
e548c9df 6255 *cost += rtx_cost (SUBREG_REG (op0), VOIDmode, SET, 0, speed);
ba123b0d 6256
43e9d192
IB
6257 /* Fall through. */
6258 case REG:
b6875aac
KV
6259 /* The cost is one per vector-register copied. */
6260 if (VECTOR_MODE_P (GET_MODE (op0)) && REG_P (op1))
6261 {
6262 int n_minus_1 = (GET_MODE_SIZE (GET_MODE (op0)) - 1)
6263 / GET_MODE_SIZE (V4SImode);
6264 *cost = COSTS_N_INSNS (n_minus_1 + 1);
6265 }
ba123b0d
JG
6266 /* const0_rtx is in general free, but we will use an
6267 instruction to set a register to 0. */
b6875aac
KV
6268 else if (REG_P (op1) || op1 == const0_rtx)
6269 {
6270 /* The cost is 1 per register copied. */
6271 int n_minus_1 = (GET_MODE_SIZE (GET_MODE (op0)) - 1)
ba123b0d 6272 / UNITS_PER_WORD;
b6875aac
KV
6273 *cost = COSTS_N_INSNS (n_minus_1 + 1);
6274 }
ba123b0d
JG
6275 else
6276 /* Cost is just the cost of the RHS of the set. */
e548c9df 6277 *cost += rtx_cost (op1, mode, SET, 1, speed);
43e9d192
IB
6278 return true;
6279
ba123b0d 6280 case ZERO_EXTRACT:
43e9d192 6281 case SIGN_EXTRACT:
ba123b0d
JG
6282 /* Bit-field insertion. Strip any redundant widening of
6283 the RHS to meet the width of the target. */
43e9d192
IB
6284 if (GET_CODE (op1) == SUBREG)
6285 op1 = SUBREG_REG (op1);
6286 if ((GET_CODE (op1) == ZERO_EXTEND
6287 || GET_CODE (op1) == SIGN_EXTEND)
4aa81c2e 6288 && CONST_INT_P (XEXP (op0, 1))
43e9d192
IB
6289 && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
6290 >= INTVAL (XEXP (op0, 1))))
6291 op1 = XEXP (op1, 0);
ba123b0d
JG
6292
6293 if (CONST_INT_P (op1))
6294 {
6295 /* MOV immediate is assumed to always be cheap. */
6296 *cost = COSTS_N_INSNS (1);
6297 }
6298 else
6299 {
6300 /* BFM. */
6301 if (speed)
6302 *cost += extra_cost->alu.bfi;
e548c9df 6303 *cost += rtx_cost (op1, VOIDmode, (enum rtx_code) code, 1, speed);
ba123b0d
JG
6304 }
6305
43e9d192
IB
6306 return true;
6307
6308 default:
ba123b0d
JG
6309 /* We can't make sense of this, assume default cost. */
6310 *cost = COSTS_N_INSNS (1);
61263118 6311 return false;
43e9d192
IB
6312 }
6313 return false;
6314
9dfc162c
JG
6315 case CONST_INT:
6316 /* If an instruction can incorporate a constant within the
6317 instruction, the instruction's expression avoids calling
6318 rtx_cost() on the constant. If rtx_cost() is called on a
6319 constant, then it is usually because the constant must be
6320 moved into a register by one or more instructions.
6321
6322 The exception is constant 0, which can be expressed
6323 as XZR/WZR and is therefore free. The exception to this is
6324 if we have (set (reg) (const0_rtx)) in which case we must cost
6325 the move. However, we can catch that when we cost the SET, so
6326 we don't need to consider that here. */
6327 if (x == const0_rtx)
6328 *cost = 0;
6329 else
6330 {
6331 /* To an approximation, building any other constant is
6332 proportionally expensive to the number of instructions
6333 required to build that constant. This is true whether we
6334 are compiling for SPEED or otherwise. */
82614948
RR
6335 *cost = COSTS_N_INSNS (aarch64_internal_mov_immediate
6336 (NULL_RTX, x, false, mode));
9dfc162c
JG
6337 }
6338 return true;
6339
6340 case CONST_DOUBLE:
6341 if (speed)
6342 {
6343 /* mov[df,sf]_aarch64. */
6344 if (aarch64_float_const_representable_p (x))
6345 /* FMOV (scalar immediate). */
6346 *cost += extra_cost->fp[mode == DFmode].fpconst;
6347 else if (!aarch64_float_const_zero_rtx_p (x))
6348 {
6349 /* This will be a load from memory. */
6350 if (mode == DFmode)
6351 *cost += extra_cost->ldst.loadd;
6352 else
6353 *cost += extra_cost->ldst.loadf;
6354 }
6355 else
6356 /* Otherwise this is +0.0. We get this using MOVI d0, #0
6357 or MOV v0.s[0], wzr - neither of which are modeled by the
6358 cost tables. Just use the default cost. */
6359 {
6360 }
6361 }
6362
6363 return true;
6364
43e9d192
IB
6365 case MEM:
6366 if (speed)
2961177e
JG
6367 {
6368 /* For loads we want the base cost of a load, plus an
6369 approximation for the additional cost of the addressing
6370 mode. */
6371 rtx address = XEXP (x, 0);
b6875aac
KV
6372 if (VECTOR_MODE_P (mode))
6373 *cost += extra_cost->ldst.loadv;
6374 else if (GET_MODE_CLASS (mode) == MODE_INT)
2961177e
JG
6375 *cost += extra_cost->ldst.load;
6376 else if (mode == SFmode)
6377 *cost += extra_cost->ldst.loadf;
6378 else if (mode == DFmode)
6379 *cost += extra_cost->ldst.loadd;
6380
6381 *cost +=
6382 COSTS_N_INSNS (aarch64_address_cost (address, mode,
6383 0, speed));
6384 }
43e9d192
IB
6385
6386 return true;
6387
6388 case NEG:
4745e701
JG
6389 op0 = XEXP (x, 0);
6390
b6875aac
KV
6391 if (VECTOR_MODE_P (mode))
6392 {
6393 if (speed)
6394 {
6395 /* FNEG. */
6396 *cost += extra_cost->vect.alu;
6397 }
6398 return false;
6399 }
6400
e548c9df
AM
6401 if (GET_MODE_CLASS (mode) == MODE_INT)
6402 {
4745e701
JG
6403 if (GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMPARE
6404 || GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMM_COMPARE)
6405 {
6406 /* CSETM. */
e548c9df 6407 *cost += rtx_cost (XEXP (op0, 0), VOIDmode, NEG, 0, speed);
4745e701
JG
6408 return true;
6409 }
6410
6411 /* Cost this as SUB wzr, X. */
e548c9df 6412 op0 = CONST0_RTX (mode);
4745e701
JG
6413 op1 = XEXP (x, 0);
6414 goto cost_minus;
6415 }
6416
e548c9df 6417 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4745e701
JG
6418 {
6419 /* Support (neg(fma...)) as a single instruction only if
6420 sign of zeros is unimportant. This matches the decision
6421 making in aarch64.md. */
6422 if (GET_CODE (op0) == FMA && !HONOR_SIGNED_ZEROS (GET_MODE (op0)))
6423 {
6424 /* FNMADD. */
e548c9df 6425 *cost = rtx_cost (op0, mode, NEG, 0, speed);
4745e701
JG
6426 return true;
6427 }
d318517d
SN
6428 if (GET_CODE (op0) == MULT)
6429 {
6430 /* FNMUL. */
6431 *cost = rtx_cost (op0, mode, NEG, 0, speed);
6432 return true;
6433 }
4745e701
JG
6434 if (speed)
6435 /* FNEG. */
6436 *cost += extra_cost->fp[mode == DFmode].neg;
6437 return false;
6438 }
6439
6440 return false;
43e9d192 6441
781aeb73
KT
6442 case CLRSB:
6443 case CLZ:
6444 if (speed)
b6875aac
KV
6445 {
6446 if (VECTOR_MODE_P (mode))
6447 *cost += extra_cost->vect.alu;
6448 else
6449 *cost += extra_cost->alu.clz;
6450 }
781aeb73
KT
6451
6452 return false;
6453
43e9d192
IB
6454 case COMPARE:
6455 op0 = XEXP (x, 0);
6456 op1 = XEXP (x, 1);
6457
6458 if (op1 == const0_rtx
6459 && GET_CODE (op0) == AND)
6460 {
6461 x = op0;
e548c9df 6462 mode = GET_MODE (op0);
43e9d192
IB
6463 goto cost_logic;
6464 }
6465
a8eecd00
JG
6466 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
6467 {
6468 /* TODO: A write to the CC flags possibly costs extra, this
6469 needs encoding in the cost tables. */
6470
e548c9df 6471 mode = GET_MODE (op0);
a8eecd00
JG
6472 /* ANDS. */
6473 if (GET_CODE (op0) == AND)
6474 {
6475 x = op0;
6476 goto cost_logic;
6477 }
6478
6479 if (GET_CODE (op0) == PLUS)
6480 {
6481 /* ADDS (and CMN alias). */
6482 x = op0;
6483 goto cost_plus;
6484 }
6485
6486 if (GET_CODE (op0) == MINUS)
6487 {
6488 /* SUBS. */
6489 x = op0;
6490 goto cost_minus;
6491 }
6492
345854d8
KT
6493 if (GET_CODE (op0) == ZERO_EXTRACT && op1 == const0_rtx
6494 && GET_MODE (x) == CC_NZmode && CONST_INT_P (XEXP (op0, 1))
6495 && CONST_INT_P (XEXP (op0, 2)))
6496 {
6497 /* COMPARE of ZERO_EXTRACT form of TST-immediate.
6498 Handle it here directly rather than going to cost_logic
6499 since we know the immediate generated for the TST is valid
6500 so we can avoid creating an intermediate rtx for it only
6501 for costing purposes. */
6502 if (speed)
6503 *cost += extra_cost->alu.logical;
6504
6505 *cost += rtx_cost (XEXP (op0, 0), GET_MODE (op0),
6506 ZERO_EXTRACT, 0, speed);
6507 return true;
6508 }
6509
a8eecd00
JG
6510 if (GET_CODE (op1) == NEG)
6511 {
6512 /* CMN. */
6513 if (speed)
6514 *cost += extra_cost->alu.arith;
6515
e548c9df
AM
6516 *cost += rtx_cost (op0, mode, COMPARE, 0, speed);
6517 *cost += rtx_cost (XEXP (op1, 0), mode, NEG, 1, speed);
a8eecd00
JG
6518 return true;
6519 }
6520
6521 /* CMP.
6522
6523 Compare can freely swap the order of operands, and
6524 canonicalization puts the more complex operation first.
6525 But the integer MINUS logic expects the shift/extend
6526 operation in op1. */
6527 if (! (REG_P (op0)
6528 || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
6529 {
6530 op0 = XEXP (x, 1);
6531 op1 = XEXP (x, 0);
6532 }
6533 goto cost_minus;
6534 }
6535
6536 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
6537 {
6538 /* FCMP. */
6539 if (speed)
6540 *cost += extra_cost->fp[mode == DFmode].compare;
6541
6542 if (CONST_DOUBLE_P (op1) && aarch64_float_const_zero_rtx_p (op1))
6543 {
e548c9df 6544 *cost += rtx_cost (op0, VOIDmode, COMPARE, 0, speed);
a8eecd00
JG
6545 /* FCMP supports constant 0.0 for no extra cost. */
6546 return true;
6547 }
6548 return false;
6549 }
6550
b6875aac
KV
6551 if (VECTOR_MODE_P (mode))
6552 {
6553 /* Vector compare. */
6554 if (speed)
6555 *cost += extra_cost->vect.alu;
6556
6557 if (aarch64_float_const_zero_rtx_p (op1))
6558 {
6559 /* Vector cm (eq|ge|gt|lt|le) supports constant 0.0 for no extra
6560 cost. */
6561 return true;
6562 }
6563 return false;
6564 }
a8eecd00 6565 return false;
43e9d192
IB
6566
6567 case MINUS:
4745e701
JG
6568 {
6569 op0 = XEXP (x, 0);
6570 op1 = XEXP (x, 1);
6571
6572cost_minus:
e548c9df 6573 *cost += rtx_cost (op0, mode, MINUS, 0, speed);
23cb6618 6574
4745e701
JG
6575 /* Detect valid immediates. */
6576 if ((GET_MODE_CLASS (mode) == MODE_INT
6577 || (GET_MODE_CLASS (mode) == MODE_CC
6578 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
6579 && CONST_INT_P (op1)
6580 && aarch64_uimm12_shift (INTVAL (op1)))
6581 {
4745e701
JG
6582 if (speed)
6583 /* SUB(S) (immediate). */
6584 *cost += extra_cost->alu.arith;
6585 return true;
4745e701
JG
6586 }
6587
7cc2145f
JG
6588 /* Look for SUB (extended register). */
6589 if (aarch64_rtx_arith_op_extract_p (op1, mode))
6590 {
6591 if (speed)
2533c820 6592 *cost += extra_cost->alu.extend_arith;
7cc2145f 6593
e47c4031
KT
6594 op1 = aarch64_strip_extend (op1);
6595 *cost += rtx_cost (op1, VOIDmode,
e548c9df 6596 (enum rtx_code) GET_CODE (op1), 0, speed);
7cc2145f
JG
6597 return true;
6598 }
6599
4745e701
JG
6600 rtx new_op1 = aarch64_strip_extend (op1);
6601
6602 /* Cost this as an FMA-alike operation. */
6603 if ((GET_CODE (new_op1) == MULT
0a78ebe4 6604 || aarch64_shift_p (GET_CODE (new_op1)))
4745e701
JG
6605 && code != COMPARE)
6606 {
6607 *cost += aarch64_rtx_mult_cost (new_op1, MULT,
6608 (enum rtx_code) code,
6609 speed);
4745e701
JG
6610 return true;
6611 }
43e9d192 6612
e548c9df 6613 *cost += rtx_cost (new_op1, VOIDmode, MINUS, 1, speed);
43e9d192 6614
4745e701
JG
6615 if (speed)
6616 {
b6875aac
KV
6617 if (VECTOR_MODE_P (mode))
6618 {
6619 /* Vector SUB. */
6620 *cost += extra_cost->vect.alu;
6621 }
6622 else if (GET_MODE_CLASS (mode) == MODE_INT)
6623 {
6624 /* SUB(S). */
6625 *cost += extra_cost->alu.arith;
6626 }
4745e701 6627 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
b6875aac
KV
6628 {
6629 /* FSUB. */
6630 *cost += extra_cost->fp[mode == DFmode].addsub;
6631 }
4745e701
JG
6632 }
6633 return true;
6634 }
43e9d192
IB
6635
6636 case PLUS:
4745e701
JG
6637 {
6638 rtx new_op0;
43e9d192 6639
4745e701
JG
6640 op0 = XEXP (x, 0);
6641 op1 = XEXP (x, 1);
43e9d192 6642
a8eecd00 6643cost_plus:
4745e701
JG
6644 if (GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMPARE
6645 || GET_RTX_CLASS (GET_CODE (op0)) == RTX_COMM_COMPARE)
6646 {
6647 /* CSINC. */
e548c9df
AM
6648 *cost += rtx_cost (XEXP (op0, 0), mode, PLUS, 0, speed);
6649 *cost += rtx_cost (op1, mode, PLUS, 1, speed);
4745e701
JG
6650 return true;
6651 }
43e9d192 6652
4745e701
JG
6653 if (GET_MODE_CLASS (mode) == MODE_INT
6654 && CONST_INT_P (op1)
6655 && aarch64_uimm12_shift (INTVAL (op1)))
6656 {
e548c9df 6657 *cost += rtx_cost (op0, mode, PLUS, 0, speed);
43e9d192 6658
4745e701
JG
6659 if (speed)
6660 /* ADD (immediate). */
6661 *cost += extra_cost->alu.arith;
6662 return true;
6663 }
6664
e548c9df 6665 *cost += rtx_cost (op1, mode, PLUS, 1, speed);
23cb6618 6666
7cc2145f
JG
6667 /* Look for ADD (extended register). */
6668 if (aarch64_rtx_arith_op_extract_p (op0, mode))
6669 {
6670 if (speed)
2533c820 6671 *cost += extra_cost->alu.extend_arith;
7cc2145f 6672
e47c4031
KT
6673 op0 = aarch64_strip_extend (op0);
6674 *cost += rtx_cost (op0, VOIDmode,
e548c9df 6675 (enum rtx_code) GET_CODE (op0), 0, speed);
7cc2145f
JG
6676 return true;
6677 }
6678
4745e701
JG
6679 /* Strip any extend, leave shifts behind as we will
6680 cost them through mult_cost. */
6681 new_op0 = aarch64_strip_extend (op0);
6682
6683 if (GET_CODE (new_op0) == MULT
0a78ebe4 6684 || aarch64_shift_p (GET_CODE (new_op0)))
4745e701
JG
6685 {
6686 *cost += aarch64_rtx_mult_cost (new_op0, MULT, PLUS,
6687 speed);
4745e701
JG
6688 return true;
6689 }
6690
e548c9df 6691 *cost += rtx_cost (new_op0, VOIDmode, PLUS, 0, speed);
4745e701
JG
6692
6693 if (speed)
6694 {
b6875aac
KV
6695 if (VECTOR_MODE_P (mode))
6696 {
6697 /* Vector ADD. */
6698 *cost += extra_cost->vect.alu;
6699 }
6700 else if (GET_MODE_CLASS (mode) == MODE_INT)
6701 {
6702 /* ADD. */
6703 *cost += extra_cost->alu.arith;
6704 }
4745e701 6705 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
b6875aac
KV
6706 {
6707 /* FADD. */
6708 *cost += extra_cost->fp[mode == DFmode].addsub;
6709 }
4745e701
JG
6710 }
6711 return true;
6712 }
43e9d192 6713
18b42b2a
KT
6714 case BSWAP:
6715 *cost = COSTS_N_INSNS (1);
6716
6717 if (speed)
b6875aac
KV
6718 {
6719 if (VECTOR_MODE_P (mode))
6720 *cost += extra_cost->vect.alu;
6721 else
6722 *cost += extra_cost->alu.rev;
6723 }
18b42b2a
KT
6724 return false;
6725
43e9d192 6726 case IOR:
f7d5cf8d
KT
6727 if (aarch_rev16_p (x))
6728 {
6729 *cost = COSTS_N_INSNS (1);
6730
b6875aac
KV
6731 if (speed)
6732 {
6733 if (VECTOR_MODE_P (mode))
6734 *cost += extra_cost->vect.alu;
6735 else
6736 *cost += extra_cost->alu.rev;
6737 }
6738 return true;
f7d5cf8d 6739 }
fb0cb7fa
KT
6740
6741 if (aarch64_extr_rtx_p (x, &op0, &op1))
6742 {
e548c9df
AM
6743 *cost += rtx_cost (op0, mode, IOR, 0, speed);
6744 *cost += rtx_cost (op1, mode, IOR, 1, speed);
fb0cb7fa
KT
6745 if (speed)
6746 *cost += extra_cost->alu.shift;
6747
6748 return true;
6749 }
f7d5cf8d 6750 /* Fall through. */
43e9d192
IB
6751 case XOR:
6752 case AND:
6753 cost_logic:
6754 op0 = XEXP (x, 0);
6755 op1 = XEXP (x, 1);
6756
b6875aac
KV
6757 if (VECTOR_MODE_P (mode))
6758 {
6759 if (speed)
6760 *cost += extra_cost->vect.alu;
6761 return true;
6762 }
6763
268c3b47
JG
6764 if (code == AND
6765 && GET_CODE (op0) == MULT
6766 && CONST_INT_P (XEXP (op0, 1))
6767 && CONST_INT_P (op1)
6768 && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (op0, 1))),
6769 INTVAL (op1)) != 0)
6770 {
6771 /* This is a UBFM/SBFM. */
e548c9df 6772 *cost += rtx_cost (XEXP (op0, 0), mode, ZERO_EXTRACT, 0, speed);
268c3b47
JG
6773 if (speed)
6774 *cost += extra_cost->alu.bfx;
6775 return true;
6776 }
6777
e548c9df 6778 if (GET_MODE_CLASS (mode) == MODE_INT)
43e9d192 6779 {
8c83f71d 6780 if (CONST_INT_P (op1))
43e9d192 6781 {
8c83f71d
KT
6782 /* We have a mask + shift version of a UBFIZ
6783 i.e. the *andim_ashift<mode>_bfiz pattern. */
6784 if (GET_CODE (op0) == ASHIFT
6785 && aarch64_mask_and_shift_for_ubfiz_p (mode, op1,
6786 XEXP (op0, 1)))
6787 {
6788 *cost += rtx_cost (XEXP (op0, 0), mode,
6789 (enum rtx_code) code, 0, speed);
6790 if (speed)
6791 *cost += extra_cost->alu.bfx;
268c3b47 6792
8c83f71d
KT
6793 return true;
6794 }
6795 else if (aarch64_bitmask_imm (INTVAL (op1), mode))
6796 {
6797 /* We possibly get the immediate for free, this is not
6798 modelled. */
6799 *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
6800 if (speed)
6801 *cost += extra_cost->alu.logical;
268c3b47 6802
8c83f71d
KT
6803 return true;
6804 }
43e9d192
IB
6805 }
6806 else
6807 {
268c3b47
JG
6808 rtx new_op0 = op0;
6809
6810 /* Handle ORN, EON, or BIC. */
43e9d192
IB
6811 if (GET_CODE (op0) == NOT)
6812 op0 = XEXP (op0, 0);
268c3b47
JG
6813
6814 new_op0 = aarch64_strip_shift (op0);
6815
6816 /* If we had a shift on op0 then this is a logical-shift-
6817 by-register/immediate operation. Otherwise, this is just
6818 a logical operation. */
6819 if (speed)
6820 {
6821 if (new_op0 != op0)
6822 {
6823 /* Shift by immediate. */
6824 if (CONST_INT_P (XEXP (op0, 1)))
6825 *cost += extra_cost->alu.log_shift;
6826 else
6827 *cost += extra_cost->alu.log_shift_reg;
6828 }
6829 else
6830 *cost += extra_cost->alu.logical;
6831 }
6832
6833 /* In both cases we want to cost both operands. */
e548c9df
AM
6834 *cost += rtx_cost (new_op0, mode, (enum rtx_code) code, 0, speed);
6835 *cost += rtx_cost (op1, mode, (enum rtx_code) code, 1, speed);
268c3b47
JG
6836
6837 return true;
43e9d192 6838 }
43e9d192
IB
6839 }
6840 return false;
6841
268c3b47 6842 case NOT:
6365da9e
KT
6843 x = XEXP (x, 0);
6844 op0 = aarch64_strip_shift (x);
6845
b6875aac
KV
6846 if (VECTOR_MODE_P (mode))
6847 {
6848 /* Vector NOT. */
6849 *cost += extra_cost->vect.alu;
6850 return false;
6851 }
6852
6365da9e
KT
6853 /* MVN-shifted-reg. */
6854 if (op0 != x)
6855 {
e548c9df 6856 *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
6365da9e
KT
6857
6858 if (speed)
6859 *cost += extra_cost->alu.log_shift;
6860
6861 return true;
6862 }
6863 /* EON can have two forms: (xor (not a) b) but also (not (xor a b)).
6864 Handle the second form here taking care that 'a' in the above can
6865 be a shift. */
6866 else if (GET_CODE (op0) == XOR)
6867 {
6868 rtx newop0 = XEXP (op0, 0);
6869 rtx newop1 = XEXP (op0, 1);
6870 rtx op0_stripped = aarch64_strip_shift (newop0);
6871
e548c9df
AM
6872 *cost += rtx_cost (newop1, mode, (enum rtx_code) code, 1, speed);
6873 *cost += rtx_cost (op0_stripped, mode, XOR, 0, speed);
6365da9e
KT
6874
6875 if (speed)
6876 {
6877 if (op0_stripped != newop0)
6878 *cost += extra_cost->alu.log_shift;
6879 else
6880 *cost += extra_cost->alu.logical;
6881 }
6882
6883 return true;
6884 }
268c3b47
JG
6885 /* MVN. */
6886 if (speed)
6887 *cost += extra_cost->alu.logical;
6888
268c3b47
JG
6889 return false;
6890
43e9d192 6891 case ZERO_EXTEND:
b1685e62
JG
6892
6893 op0 = XEXP (x, 0);
6894 /* If a value is written in SI mode, then zero extended to DI
6895 mode, the operation will in general be free as a write to
6896 a 'w' register implicitly zeroes the upper bits of an 'x'
6897 register. However, if this is
6898
6899 (set (reg) (zero_extend (reg)))
6900
6901 we must cost the explicit register move. */
6902 if (mode == DImode
6903 && GET_MODE (op0) == SImode
6904 && outer == SET)
6905 {
e548c9df 6906 int op_cost = rtx_cost (op0, VOIDmode, ZERO_EXTEND, 0, speed);
b1685e62 6907
dde23f43
KM
6908 /* If OP_COST is non-zero, then the cost of the zero extend
6909 is effectively the cost of the inner operation. Otherwise
6910 we have a MOV instruction and we take the cost from the MOV
6911 itself. This is true independently of whether we are
6912 optimizing for space or time. */
6913 if (op_cost)
b1685e62
JG
6914 *cost = op_cost;
6915
6916 return true;
6917 }
e548c9df 6918 else if (MEM_P (op0))
43e9d192 6919 {
b1685e62 6920 /* All loads can zero extend to any size for free. */
e548c9df 6921 *cost = rtx_cost (op0, VOIDmode, ZERO_EXTEND, param, speed);
43e9d192
IB
6922 return true;
6923 }
b1685e62 6924
283b6c85
KT
6925 op0 = aarch64_extend_bitfield_pattern_p (x);
6926 if (op0)
6927 {
6928 *cost += rtx_cost (op0, mode, ZERO_EXTEND, 0, speed);
6929 if (speed)
6930 *cost += extra_cost->alu.bfx;
6931 return true;
6932 }
6933
b1685e62 6934 if (speed)
b6875aac
KV
6935 {
6936 if (VECTOR_MODE_P (mode))
6937 {
6938 /* UMOV. */
6939 *cost += extra_cost->vect.alu;
6940 }
6941 else
6942 {
63715e5e
WD
6943 /* We generate an AND instead of UXTB/UXTH. */
6944 *cost += extra_cost->alu.logical;
b6875aac
KV
6945 }
6946 }
43e9d192
IB
6947 return false;
6948
6949 case SIGN_EXTEND:
b1685e62 6950 if (MEM_P (XEXP (x, 0)))
43e9d192 6951 {
b1685e62
JG
6952 /* LDRSH. */
6953 if (speed)
6954 {
6955 rtx address = XEXP (XEXP (x, 0), 0);
6956 *cost += extra_cost->ldst.load_sign_extend;
6957
6958 *cost +=
6959 COSTS_N_INSNS (aarch64_address_cost (address, mode,
6960 0, speed));
6961 }
43e9d192
IB
6962 return true;
6963 }
b1685e62 6964
283b6c85
KT
6965 op0 = aarch64_extend_bitfield_pattern_p (x);
6966 if (op0)
6967 {
6968 *cost += rtx_cost (op0, mode, SIGN_EXTEND, 0, speed);
6969 if (speed)
6970 *cost += extra_cost->alu.bfx;
6971 return true;
6972 }
6973
b1685e62 6974 if (speed)
b6875aac
KV
6975 {
6976 if (VECTOR_MODE_P (mode))
6977 *cost += extra_cost->vect.alu;
6978 else
6979 *cost += extra_cost->alu.extend;
6980 }
43e9d192
IB
6981 return false;
6982
ba0cfa17
JG
6983 case ASHIFT:
6984 op0 = XEXP (x, 0);
6985 op1 = XEXP (x, 1);
6986
6987 if (CONST_INT_P (op1))
6988 {
ba0cfa17 6989 if (speed)
b6875aac
KV
6990 {
6991 if (VECTOR_MODE_P (mode))
6992 {
6993 /* Vector shift (immediate). */
6994 *cost += extra_cost->vect.alu;
6995 }
6996 else
6997 {
6998 /* LSL (immediate), UBMF, UBFIZ and friends. These are all
6999 aliases. */
7000 *cost += extra_cost->alu.shift;
7001 }
7002 }
ba0cfa17
JG
7003
7004 /* We can incorporate zero/sign extend for free. */
7005 if (GET_CODE (op0) == ZERO_EXTEND
7006 || GET_CODE (op0) == SIGN_EXTEND)
7007 op0 = XEXP (op0, 0);
7008
e548c9df 7009 *cost += rtx_cost (op0, VOIDmode, ASHIFT, 0, speed);
ba0cfa17
JG
7010 return true;
7011 }
7012 else
7013 {
ba0cfa17 7014 if (speed)
b6875aac
KV
7015 {
7016 if (VECTOR_MODE_P (mode))
7017 {
7018 /* Vector shift (register). */
7019 *cost += extra_cost->vect.alu;
7020 }
7021 else
7022 {
7023 /* LSLV. */
7024 *cost += extra_cost->alu.shift_reg;
7025 }
7026 }
ba0cfa17
JG
7027 return false; /* All arguments need to be in registers. */
7028 }
7029
43e9d192 7030 case ROTATE:
43e9d192
IB
7031 case ROTATERT:
7032 case LSHIFTRT:
43e9d192 7033 case ASHIFTRT:
ba0cfa17
JG
7034 op0 = XEXP (x, 0);
7035 op1 = XEXP (x, 1);
43e9d192 7036
ba0cfa17
JG
7037 if (CONST_INT_P (op1))
7038 {
7039 /* ASR (immediate) and friends. */
7040 if (speed)
b6875aac
KV
7041 {
7042 if (VECTOR_MODE_P (mode))
7043 *cost += extra_cost->vect.alu;
7044 else
7045 *cost += extra_cost->alu.shift;
7046 }
43e9d192 7047
e548c9df 7048 *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
ba0cfa17
JG
7049 return true;
7050 }
7051 else
7052 {
7053
7054 /* ASR (register) and friends. */
7055 if (speed)
b6875aac
KV
7056 {
7057 if (VECTOR_MODE_P (mode))
7058 *cost += extra_cost->vect.alu;
7059 else
7060 *cost += extra_cost->alu.shift_reg;
7061 }
ba0cfa17
JG
7062 return false; /* All arguments need to be in registers. */
7063 }
43e9d192 7064
909734be
JG
7065 case SYMBOL_REF:
7066
1b1e81f8
JW
7067 if (aarch64_cmodel == AARCH64_CMODEL_LARGE
7068 || aarch64_cmodel == AARCH64_CMODEL_SMALL_SPIC)
909734be
JG
7069 {
7070 /* LDR. */
7071 if (speed)
7072 *cost += extra_cost->ldst.load;
7073 }
7074 else if (aarch64_cmodel == AARCH64_CMODEL_SMALL
7075 || aarch64_cmodel == AARCH64_CMODEL_SMALL_PIC)
7076 {
7077 /* ADRP, followed by ADD. */
7078 *cost += COSTS_N_INSNS (1);
7079 if (speed)
7080 *cost += 2 * extra_cost->alu.arith;
7081 }
7082 else if (aarch64_cmodel == AARCH64_CMODEL_TINY
7083 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
7084 {
7085 /* ADR. */
7086 if (speed)
7087 *cost += extra_cost->alu.arith;
7088 }
7089
7090 if (flag_pic)
7091 {
7092 /* One extra load instruction, after accessing the GOT. */
7093 *cost += COSTS_N_INSNS (1);
7094 if (speed)
7095 *cost += extra_cost->ldst.load;
7096 }
43e9d192
IB
7097 return true;
7098
909734be 7099 case HIGH:
43e9d192 7100 case LO_SUM:
909734be
JG
7101 /* ADRP/ADD (immediate). */
7102 if (speed)
7103 *cost += extra_cost->alu.arith;
43e9d192
IB
7104 return true;
7105
7106 case ZERO_EXTRACT:
7107 case SIGN_EXTRACT:
7cc2145f
JG
7108 /* UBFX/SBFX. */
7109 if (speed)
b6875aac
KV
7110 {
7111 if (VECTOR_MODE_P (mode))
7112 *cost += extra_cost->vect.alu;
7113 else
7114 *cost += extra_cost->alu.bfx;
7115 }
7cc2145f
JG
7116
7117 /* We can trust that the immediates used will be correct (there
7118 are no by-register forms), so we need only cost op0. */
e548c9df 7119 *cost += rtx_cost (XEXP (x, 0), VOIDmode, (enum rtx_code) code, 0, speed);
43e9d192
IB
7120 return true;
7121
7122 case MULT:
4745e701
JG
7123 *cost += aarch64_rtx_mult_cost (x, MULT, 0, speed);
7124 /* aarch64_rtx_mult_cost always handles recursion to its
7125 operands. */
7126 return true;
43e9d192
IB
7127
7128 case MOD:
4f58fe36
KT
7129 /* We can expand signed mod by power of 2 using a NEGS, two parallel
7130 ANDs and a CSNEG. Assume here that CSNEG is the same as the cost of
7131 an unconditional negate. This case should only ever be reached through
7132 the set_smod_pow2_cheap check in expmed.c. */
7133 if (CONST_INT_P (XEXP (x, 1))
7134 && exact_log2 (INTVAL (XEXP (x, 1))) > 0
7135 && (mode == SImode || mode == DImode))
7136 {
7137 /* We expand to 4 instructions. Reset the baseline. */
7138 *cost = COSTS_N_INSNS (4);
7139
7140 if (speed)
7141 *cost += 2 * extra_cost->alu.logical
7142 + 2 * extra_cost->alu.arith;
7143
7144 return true;
7145 }
7146
7147 /* Fall-through. */
43e9d192 7148 case UMOD:
43e9d192
IB
7149 if (speed)
7150 {
b6875aac
KV
7151 if (VECTOR_MODE_P (mode))
7152 *cost += extra_cost->vect.alu;
e548c9df
AM
7153 else if (GET_MODE_CLASS (mode) == MODE_INT)
7154 *cost += (extra_cost->mult[mode == DImode].add
7155 + extra_cost->mult[mode == DImode].idiv);
7156 else if (mode == DFmode)
73250c4c
KT
7157 *cost += (extra_cost->fp[1].mult
7158 + extra_cost->fp[1].div);
e548c9df 7159 else if (mode == SFmode)
73250c4c
KT
7160 *cost += (extra_cost->fp[0].mult
7161 + extra_cost->fp[0].div);
43e9d192
IB
7162 }
7163 return false; /* All arguments need to be in registers. */
7164
7165 case DIV:
7166 case UDIV:
4105fe38 7167 case SQRT:
43e9d192
IB
7168 if (speed)
7169 {
b6875aac
KV
7170 if (VECTOR_MODE_P (mode))
7171 *cost += extra_cost->vect.alu;
7172 else if (GET_MODE_CLASS (mode) == MODE_INT)
4105fe38
JG
7173 /* There is no integer SQRT, so only DIV and UDIV can get
7174 here. */
7175 *cost += extra_cost->mult[mode == DImode].idiv;
7176 else
7177 *cost += extra_cost->fp[mode == DFmode].div;
43e9d192
IB
7178 }
7179 return false; /* All arguments need to be in registers. */
7180
a8eecd00 7181 case IF_THEN_ELSE:
2d5ffe46
AP
7182 return aarch64_if_then_else_costs (XEXP (x, 0), XEXP (x, 1),
7183 XEXP (x, 2), cost, speed);
a8eecd00
JG
7184
7185 case EQ:
7186 case NE:
7187 case GT:
7188 case GTU:
7189 case LT:
7190 case LTU:
7191 case GE:
7192 case GEU:
7193 case LE:
7194 case LEU:
7195
7196 return false; /* All arguments must be in registers. */
7197
b292109f
JG
7198 case FMA:
7199 op0 = XEXP (x, 0);
7200 op1 = XEXP (x, 1);
7201 op2 = XEXP (x, 2);
7202
7203 if (speed)
b6875aac
KV
7204 {
7205 if (VECTOR_MODE_P (mode))
7206 *cost += extra_cost->vect.alu;
7207 else
7208 *cost += extra_cost->fp[mode == DFmode].fma;
7209 }
b292109f
JG
7210
7211 /* FMSUB, FNMADD, and FNMSUB are free. */
7212 if (GET_CODE (op0) == NEG)
7213 op0 = XEXP (op0, 0);
7214
7215 if (GET_CODE (op2) == NEG)
7216 op2 = XEXP (op2, 0);
7217
7218 /* aarch64_fnma4_elt_to_64v2df has the NEG as operand 1,
7219 and the by-element operand as operand 0. */
7220 if (GET_CODE (op1) == NEG)
7221 op1 = XEXP (op1, 0);
7222
7223 /* Catch vector-by-element operations. The by-element operand can
7224 either be (vec_duplicate (vec_select (x))) or just
7225 (vec_select (x)), depending on whether we are multiplying by
7226 a vector or a scalar.
7227
7228 Canonicalization is not very good in these cases, FMA4 will put the
7229 by-element operand as operand 0, FNMA4 will have it as operand 1. */
7230 if (GET_CODE (op0) == VEC_DUPLICATE)
7231 op0 = XEXP (op0, 0);
7232 else if (GET_CODE (op1) == VEC_DUPLICATE)
7233 op1 = XEXP (op1, 0);
7234
7235 if (GET_CODE (op0) == VEC_SELECT)
7236 op0 = XEXP (op0, 0);
7237 else if (GET_CODE (op1) == VEC_SELECT)
7238 op1 = XEXP (op1, 0);
7239
7240 /* If the remaining parameters are not registers,
7241 get the cost to put them into registers. */
e548c9df
AM
7242 *cost += rtx_cost (op0, mode, FMA, 0, speed);
7243 *cost += rtx_cost (op1, mode, FMA, 1, speed);
7244 *cost += rtx_cost (op2, mode, FMA, 2, speed);
b292109f
JG
7245 return true;
7246
5e2a765b
KT
7247 case FLOAT:
7248 case UNSIGNED_FLOAT:
7249 if (speed)
7250 *cost += extra_cost->fp[mode == DFmode].fromint;
7251 return false;
7252
b292109f
JG
7253 case FLOAT_EXTEND:
7254 if (speed)
b6875aac
KV
7255 {
7256 if (VECTOR_MODE_P (mode))
7257 {
7258 /*Vector truncate. */
7259 *cost += extra_cost->vect.alu;
7260 }
7261 else
7262 *cost += extra_cost->fp[mode == DFmode].widen;
7263 }
b292109f
JG
7264 return false;
7265
7266 case FLOAT_TRUNCATE:
7267 if (speed)
b6875aac
KV
7268 {
7269 if (VECTOR_MODE_P (mode))
7270 {
7271 /*Vector conversion. */
7272 *cost += extra_cost->vect.alu;
7273 }
7274 else
7275 *cost += extra_cost->fp[mode == DFmode].narrow;
7276 }
b292109f
JG
7277 return false;
7278
61263118
KT
7279 case FIX:
7280 case UNSIGNED_FIX:
7281 x = XEXP (x, 0);
7282 /* Strip the rounding part. They will all be implemented
7283 by the fcvt* family of instructions anyway. */
7284 if (GET_CODE (x) == UNSPEC)
7285 {
7286 unsigned int uns_code = XINT (x, 1);
7287
7288 if (uns_code == UNSPEC_FRINTA
7289 || uns_code == UNSPEC_FRINTM
7290 || uns_code == UNSPEC_FRINTN
7291 || uns_code == UNSPEC_FRINTP
7292 || uns_code == UNSPEC_FRINTZ)
7293 x = XVECEXP (x, 0, 0);
7294 }
7295
7296 if (speed)
b6875aac
KV
7297 {
7298 if (VECTOR_MODE_P (mode))
7299 *cost += extra_cost->vect.alu;
7300 else
7301 *cost += extra_cost->fp[GET_MODE (x) == DFmode].toint;
7302 }
39252973
KT
7303
7304 /* We can combine fmul by a power of 2 followed by a fcvt into a single
7305 fixed-point fcvt. */
7306 if (GET_CODE (x) == MULT
7307 && ((VECTOR_MODE_P (mode)
7308 && aarch64_vec_fpconst_pow_of_2 (XEXP (x, 1)) > 0)
7309 || aarch64_fpconst_pow_of_2 (XEXP (x, 1)) > 0))
7310 {
7311 *cost += rtx_cost (XEXP (x, 0), VOIDmode, (rtx_code) code,
7312 0, speed);
7313 return true;
7314 }
7315
e548c9df 7316 *cost += rtx_cost (x, VOIDmode, (enum rtx_code) code, 0, speed);
61263118
KT
7317 return true;
7318
b292109f 7319 case ABS:
b6875aac
KV
7320 if (VECTOR_MODE_P (mode))
7321 {
7322 /* ABS (vector). */
7323 if (speed)
7324 *cost += extra_cost->vect.alu;
7325 }
7326 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
b292109f 7327 {
19261b99
KT
7328 op0 = XEXP (x, 0);
7329
7330 /* FABD, which is analogous to FADD. */
7331 if (GET_CODE (op0) == MINUS)
7332 {
e548c9df
AM
7333 *cost += rtx_cost (XEXP (op0, 0), mode, MINUS, 0, speed);
7334 *cost += rtx_cost (XEXP (op0, 1), mode, MINUS, 1, speed);
19261b99
KT
7335 if (speed)
7336 *cost += extra_cost->fp[mode == DFmode].addsub;
7337
7338 return true;
7339 }
7340 /* Simple FABS is analogous to FNEG. */
b292109f
JG
7341 if (speed)
7342 *cost += extra_cost->fp[mode == DFmode].neg;
7343 }
7344 else
7345 {
7346 /* Integer ABS will either be split to
7347 two arithmetic instructions, or will be an ABS
7348 (scalar), which we don't model. */
7349 *cost = COSTS_N_INSNS (2);
7350 if (speed)
7351 *cost += 2 * extra_cost->alu.arith;
7352 }
7353 return false;
7354
7355 case SMAX:
7356 case SMIN:
7357 if (speed)
7358 {
b6875aac
KV
7359 if (VECTOR_MODE_P (mode))
7360 *cost += extra_cost->vect.alu;
7361 else
7362 {
7363 /* FMAXNM/FMINNM/FMAX/FMIN.
7364 TODO: This may not be accurate for all implementations, but
7365 we do not model this in the cost tables. */
7366 *cost += extra_cost->fp[mode == DFmode].addsub;
7367 }
b292109f
JG
7368 }
7369 return false;
7370
61263118
KT
7371 case UNSPEC:
7372 /* The floating point round to integer frint* instructions. */
7373 if (aarch64_frint_unspec_p (XINT (x, 1)))
7374 {
7375 if (speed)
7376 *cost += extra_cost->fp[mode == DFmode].roundint;
7377
7378 return false;
7379 }
781aeb73
KT
7380
7381 if (XINT (x, 1) == UNSPEC_RBIT)
7382 {
7383 if (speed)
7384 *cost += extra_cost->alu.rev;
7385
7386 return false;
7387 }
61263118
KT
7388 break;
7389
fb620c4a
JG
7390 case TRUNCATE:
7391
7392 /* Decompose <su>muldi3_highpart. */
7393 if (/* (truncate:DI */
7394 mode == DImode
7395 /* (lshiftrt:TI */
7396 && GET_MODE (XEXP (x, 0)) == TImode
7397 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
7398 /* (mult:TI */
7399 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7400 /* (ANY_EXTEND:TI (reg:DI))
7401 (ANY_EXTEND:TI (reg:DI))) */
7402 && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
7403 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == ZERO_EXTEND)
7404 || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND
7405 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND))
7406 && GET_MODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0)) == DImode
7407 && GET_MODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 1), 0)) == DImode
7408 /* (const_int 64) */
7409 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7410 && UINTVAL (XEXP (XEXP (x, 0), 1)) == 64)
7411 {
7412 /* UMULH/SMULH. */
7413 if (speed)
7414 *cost += extra_cost->mult[mode == DImode].extend;
e548c9df
AM
7415 *cost += rtx_cost (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0),
7416 mode, MULT, 0, speed);
7417 *cost += rtx_cost (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 1), 0),
7418 mode, MULT, 1, speed);
fb620c4a
JG
7419 return true;
7420 }
7421
7422 /* Fall through. */
43e9d192 7423 default:
61263118 7424 break;
43e9d192 7425 }
61263118
KT
7426
7427 if (dump_file && (dump_flags & TDF_DETAILS))
7428 fprintf (dump_file,
7429 "\nFailed to cost RTX. Assuming default cost.\n");
7430
7431 return true;
43e9d192
IB
7432}
7433
0ee859b5
JG
7434/* Wrapper around aarch64_rtx_costs, dumps the partial, or total cost
7435 calculated for X. This cost is stored in *COST. Returns true
7436 if the total cost of X was calculated. */
7437static bool
e548c9df 7438aarch64_rtx_costs_wrapper (rtx x, machine_mode mode, int outer,
0ee859b5
JG
7439 int param, int *cost, bool speed)
7440{
e548c9df 7441 bool result = aarch64_rtx_costs (x, mode, outer, param, cost, speed);
0ee859b5
JG
7442
7443 if (dump_file && (dump_flags & TDF_DETAILS))
7444 {
7445 print_rtl_single (dump_file, x);
7446 fprintf (dump_file, "\n%s cost: %d (%s)\n",
7447 speed ? "Hot" : "Cold",
7448 *cost, result ? "final" : "partial");
7449 }
7450
7451 return result;
7452}
7453
43e9d192 7454static int
ef4bddc2 7455aarch64_register_move_cost (machine_mode mode,
8a3a7e67 7456 reg_class_t from_i, reg_class_t to_i)
43e9d192 7457{
8a3a7e67
RH
7458 enum reg_class from = (enum reg_class) from_i;
7459 enum reg_class to = (enum reg_class) to_i;
43e9d192 7460 const struct cpu_regmove_cost *regmove_cost
b175b679 7461 = aarch64_tune_params.regmove_cost;
43e9d192 7462
3be07662 7463 /* Caller save and pointer regs are equivalent to GENERAL_REGS. */
2876a13f 7464 if (to == CALLER_SAVE_REGS || to == POINTER_REGS)
3be07662
WD
7465 to = GENERAL_REGS;
7466
2876a13f 7467 if (from == CALLER_SAVE_REGS || from == POINTER_REGS)
3be07662
WD
7468 from = GENERAL_REGS;
7469
6ee70f81
AP
7470 /* Moving between GPR and stack cost is the same as GP2GP. */
7471 if ((from == GENERAL_REGS && to == STACK_REG)
7472 || (to == GENERAL_REGS && from == STACK_REG))
7473 return regmove_cost->GP2GP;
7474
7475 /* To/From the stack register, we move via the gprs. */
7476 if (to == STACK_REG || from == STACK_REG)
7477 return aarch64_register_move_cost (mode, from, GENERAL_REGS)
7478 + aarch64_register_move_cost (mode, GENERAL_REGS, to);
7479
8919453c
WD
7480 if (GET_MODE_SIZE (mode) == 16)
7481 {
7482 /* 128-bit operations on general registers require 2 instructions. */
7483 if (from == GENERAL_REGS && to == GENERAL_REGS)
7484 return regmove_cost->GP2GP * 2;
7485 else if (from == GENERAL_REGS)
7486 return regmove_cost->GP2FP * 2;
7487 else if (to == GENERAL_REGS)
7488 return regmove_cost->FP2GP * 2;
7489
7490 /* When AdvSIMD instructions are disabled it is not possible to move
7491 a 128-bit value directly between Q registers. This is handled in
7492 secondary reload. A general register is used as a scratch to move
7493 the upper DI value and the lower DI value is moved directly,
7494 hence the cost is the sum of three moves. */
7495 if (! TARGET_SIMD)
7496 return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
7497
7498 return regmove_cost->FP2FP;
7499 }
7500
43e9d192
IB
7501 if (from == GENERAL_REGS && to == GENERAL_REGS)
7502 return regmove_cost->GP2GP;
7503 else if (from == GENERAL_REGS)
7504 return regmove_cost->GP2FP;
7505 else if (to == GENERAL_REGS)
7506 return regmove_cost->FP2GP;
7507
43e9d192
IB
7508 return regmove_cost->FP2FP;
7509}
7510
7511static int
ef4bddc2 7512aarch64_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
43e9d192
IB
7513 reg_class_t rclass ATTRIBUTE_UNUSED,
7514 bool in ATTRIBUTE_UNUSED)
7515{
b175b679 7516 return aarch64_tune_params.memmov_cost;
43e9d192
IB
7517}
7518
0c30e0f3
EM
7519/* Return true if it is safe and beneficial to use the approximate rsqrt optabs
7520 to optimize 1.0/sqrt. */
ee62a5a6
RS
7521
7522static bool
9acc9cbe 7523use_rsqrt_p (machine_mode mode)
ee62a5a6
RS
7524{
7525 return (!flag_trapping_math
7526 && flag_unsafe_math_optimizations
9acc9cbe
EM
7527 && ((aarch64_tune_params.approx_modes->recip_sqrt
7528 & AARCH64_APPROX_MODE (mode))
1a33079e 7529 || flag_mrecip_low_precision_sqrt));
ee62a5a6
RS
7530}
7531
0c30e0f3
EM
7532/* Function to decide when to use the approximate reciprocal square root
7533 builtin. */
a6fc00da
BH
7534
7535static tree
ee62a5a6 7536aarch64_builtin_reciprocal (tree fndecl)
a6fc00da 7537{
9acc9cbe
EM
7538 machine_mode mode = TYPE_MODE (TREE_TYPE (fndecl));
7539
7540 if (!use_rsqrt_p (mode))
a6fc00da 7541 return NULL_TREE;
ee62a5a6 7542 return aarch64_builtin_rsqrt (DECL_FUNCTION_CODE (fndecl));
a6fc00da
BH
7543}
7544
7545typedef rtx (*rsqrte_type) (rtx, rtx);
7546
98daafa0
EM
7547/* Select reciprocal square root initial estimate insn depending on machine
7548 mode. */
a6fc00da 7549
98daafa0 7550static rsqrte_type
a6fc00da
BH
7551get_rsqrte_type (machine_mode mode)
7552{
7553 switch (mode)
7554 {
2a823433
JW
7555 case DFmode: return gen_aarch64_rsqrtedf;
7556 case SFmode: return gen_aarch64_rsqrtesf;
7557 case V2DFmode: return gen_aarch64_rsqrtev2df;
7558 case V2SFmode: return gen_aarch64_rsqrtev2sf;
7559 case V4SFmode: return gen_aarch64_rsqrtev4sf;
a6fc00da
BH
7560 default: gcc_unreachable ();
7561 }
7562}
7563
7564typedef rtx (*rsqrts_type) (rtx, rtx, rtx);
7565
98daafa0 7566/* Select reciprocal square root series step insn depending on machine mode. */
a6fc00da 7567
98daafa0 7568static rsqrts_type
a6fc00da
BH
7569get_rsqrts_type (machine_mode mode)
7570{
7571 switch (mode)
7572 {
00ea75d4
JW
7573 case DFmode: return gen_aarch64_rsqrtsdf;
7574 case SFmode: return gen_aarch64_rsqrtssf;
7575 case V2DFmode: return gen_aarch64_rsqrtsv2df;
7576 case V2SFmode: return gen_aarch64_rsqrtsv2sf;
7577 case V4SFmode: return gen_aarch64_rsqrtsv4sf;
a6fc00da
BH
7578 default: gcc_unreachable ();
7579 }
7580}
7581
98daafa0
EM
7582/* Emit instruction sequence to compute either the approximate square root
7583 or its approximate reciprocal, depending on the flag RECP, and return
7584 whether the sequence was emitted or not. */
a6fc00da 7585
98daafa0
EM
7586bool
7587aarch64_emit_approx_sqrt (rtx dst, rtx src, bool recp)
a6fc00da 7588{
98daafa0 7589 machine_mode mode = GET_MODE (dst);
daef0a8c
JW
7590
7591 if (GET_MODE_INNER (mode) == HFmode)
7592 return false;
7593
98daafa0
EM
7594 machine_mode mmsk = mode_for_vector
7595 (int_mode_for_mode (GET_MODE_INNER (mode)),
7596 GET_MODE_NUNITS (mode));
7597 bool use_approx_sqrt_p = (!recp
7598 && (flag_mlow_precision_sqrt
7599 || (aarch64_tune_params.approx_modes->sqrt
7600 & AARCH64_APPROX_MODE (mode))));
7601 bool use_approx_rsqrt_p = (recp
7602 && (flag_mrecip_low_precision_sqrt
7603 || (aarch64_tune_params.approx_modes->recip_sqrt
7604 & AARCH64_APPROX_MODE (mode))));
7605
7606 if (!flag_finite_math_only
7607 || flag_trapping_math
7608 || !flag_unsafe_math_optimizations
7609 || !(use_approx_sqrt_p || use_approx_rsqrt_p)
7610 || optimize_function_for_size_p (cfun))
7611 return false;
a6fc00da 7612
98daafa0
EM
7613 rtx xmsk = gen_reg_rtx (mmsk);
7614 if (!recp)
7615 /* When calculating the approximate square root, compare the argument with
7616 0.0 and create a mask. */
7617 emit_insn (gen_rtx_SET (xmsk, gen_rtx_NEG (mmsk, gen_rtx_EQ (mmsk, src,
7618 CONST0_RTX (mode)))));
a6fc00da 7619
98daafa0
EM
7620 /* Estimate the approximate reciprocal square root. */
7621 rtx xdst = gen_reg_rtx (mode);
7622 emit_insn ((*get_rsqrte_type (mode)) (xdst, src));
a6fc00da 7623
98daafa0
EM
7624 /* Iterate over the series twice for SF and thrice for DF. */
7625 int iterations = (GET_MODE_INNER (mode) == DFmode) ? 3 : 2;
a6fc00da 7626
98daafa0
EM
7627 /* Optionally iterate over the series once less for faster performance
7628 while sacrificing the accuracy. */
7629 if ((recp && flag_mrecip_low_precision_sqrt)
7630 || (!recp && flag_mlow_precision_sqrt))
a6fc00da
BH
7631 iterations--;
7632
98daafa0
EM
7633 /* Iterate over the series to calculate the approximate reciprocal square
7634 root. */
7635 rtx x1 = gen_reg_rtx (mode);
7636 while (iterations--)
a6fc00da 7637 {
a6fc00da 7638 rtx x2 = gen_reg_rtx (mode);
98daafa0
EM
7639 emit_set_insn (x2, gen_rtx_MULT (mode, xdst, xdst));
7640
7641 emit_insn ((*get_rsqrts_type (mode)) (x1, src, x2));
a6fc00da 7642
98daafa0
EM
7643 if (iterations > 0)
7644 emit_set_insn (xdst, gen_rtx_MULT (mode, xdst, x1));
7645 }
7646
7647 if (!recp)
7648 {
7649 /* Qualify the approximate reciprocal square root when the argument is
7650 0.0 by squashing the intermediary result to 0.0. */
7651 rtx xtmp = gen_reg_rtx (mmsk);
7652 emit_set_insn (xtmp, gen_rtx_AND (mmsk, gen_rtx_NOT (mmsk, xmsk),
7653 gen_rtx_SUBREG (mmsk, xdst, 0)));
7654 emit_move_insn (xdst, gen_rtx_SUBREG (mode, xtmp, 0));
a6fc00da 7655
98daafa0
EM
7656 /* Calculate the approximate square root. */
7657 emit_set_insn (xdst, gen_rtx_MULT (mode, xdst, src));
a6fc00da
BH
7658 }
7659
98daafa0
EM
7660 /* Finalize the approximation. */
7661 emit_set_insn (dst, gen_rtx_MULT (mode, xdst, x1));
7662
7663 return true;
a6fc00da
BH
7664}
7665
79a2bc2d
EM
7666typedef rtx (*recpe_type) (rtx, rtx);
7667
7668/* Select reciprocal initial estimate insn depending on machine mode. */
7669
7670static recpe_type
7671get_recpe_type (machine_mode mode)
7672{
7673 switch (mode)
7674 {
7675 case SFmode: return (gen_aarch64_frecpesf);
7676 case V2SFmode: return (gen_aarch64_frecpev2sf);
7677 case V4SFmode: return (gen_aarch64_frecpev4sf);
7678 case DFmode: return (gen_aarch64_frecpedf);
7679 case V2DFmode: return (gen_aarch64_frecpev2df);
7680 default: gcc_unreachable ();
7681 }
7682}
7683
7684typedef rtx (*recps_type) (rtx, rtx, rtx);
7685
7686/* Select reciprocal series step insn depending on machine mode. */
7687
7688static recps_type
7689get_recps_type (machine_mode mode)
7690{
7691 switch (mode)
7692 {
7693 case SFmode: return (gen_aarch64_frecpssf);
7694 case V2SFmode: return (gen_aarch64_frecpsv2sf);
7695 case V4SFmode: return (gen_aarch64_frecpsv4sf);
7696 case DFmode: return (gen_aarch64_frecpsdf);
7697 case V2DFmode: return (gen_aarch64_frecpsv2df);
7698 default: gcc_unreachable ();
7699 }
7700}
7701
7702/* Emit the instruction sequence to compute the approximation for the division
7703 of NUM by DEN in QUO and return whether the sequence was emitted or not. */
7704
7705bool
7706aarch64_emit_approx_div (rtx quo, rtx num, rtx den)
7707{
7708 machine_mode mode = GET_MODE (quo);
33d72b63
JW
7709
7710 if (GET_MODE_INNER (mode) == HFmode)
7711 return false;
7712
79a2bc2d
EM
7713 bool use_approx_division_p = (flag_mlow_precision_div
7714 || (aarch64_tune_params.approx_modes->division
7715 & AARCH64_APPROX_MODE (mode)));
7716
7717 if (!flag_finite_math_only
7718 || flag_trapping_math
7719 || !flag_unsafe_math_optimizations
7720 || optimize_function_for_size_p (cfun)
7721 || !use_approx_division_p)
7722 return false;
7723
7724 /* Estimate the approximate reciprocal. */
7725 rtx xrcp = gen_reg_rtx (mode);
7726 emit_insn ((*get_recpe_type (mode)) (xrcp, den));
7727
7728 /* Iterate over the series twice for SF and thrice for DF. */
7729 int iterations = (GET_MODE_INNER (mode) == DFmode) ? 3 : 2;
7730
7731 /* Optionally iterate over the series once less for faster performance,
7732 while sacrificing the accuracy. */
7733 if (flag_mlow_precision_div)
7734 iterations--;
7735
7736 /* Iterate over the series to calculate the approximate reciprocal. */
7737 rtx xtmp = gen_reg_rtx (mode);
7738 while (iterations--)
7739 {
7740 emit_insn ((*get_recps_type (mode)) (xtmp, xrcp, den));
7741
7742 if (iterations > 0)
7743 emit_set_insn (xrcp, gen_rtx_MULT (mode, xrcp, xtmp));
7744 }
7745
7746 if (num != CONST1_RTX (mode))
7747 {
7748 /* As the approximate reciprocal of DEN is already calculated, only
7749 calculate the approximate division when NUM is not 1.0. */
7750 rtx xnum = force_reg (mode, num);
7751 emit_set_insn (xrcp, gen_rtx_MULT (mode, xrcp, xnum));
7752 }
7753
7754 /* Finalize the approximation. */
7755 emit_set_insn (quo, gen_rtx_MULT (mode, xrcp, xtmp));
7756 return true;
7757}
7758
d126a4ae
AP
7759/* Return the number of instructions that can be issued per cycle. */
7760static int
7761aarch64_sched_issue_rate (void)
7762{
b175b679 7763 return aarch64_tune_params.issue_rate;
d126a4ae
AP
7764}
7765
d03f7e44
MK
7766static int
7767aarch64_sched_first_cycle_multipass_dfa_lookahead (void)
7768{
7769 int issue_rate = aarch64_sched_issue_rate ();
7770
7771 return issue_rate > 1 && !sched_fusion ? issue_rate : 0;
7772}
7773
2d6bc7fa
KT
7774
7775/* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD as
7776 autopref_multipass_dfa_lookahead_guard from haifa-sched.c. It only
7777 has an effect if PARAM_SCHED_AUTOPREF_QUEUE_DEPTH > 0. */
7778
7779static int
7780aarch64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn,
7781 int ready_index)
7782{
7783 return autopref_multipass_dfa_lookahead_guard (insn, ready_index);
7784}
7785
7786
8990e73a
TB
7787/* Vectorizer cost model target hooks. */
7788
7789/* Implement targetm.vectorize.builtin_vectorization_cost. */
7790static int
7791aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
7792 tree vectype,
7793 int misalign ATTRIBUTE_UNUSED)
7794{
7795 unsigned elements;
7796
7797 switch (type_of_cost)
7798 {
7799 case scalar_stmt:
b175b679 7800 return aarch64_tune_params.vec_costs->scalar_stmt_cost;
8990e73a
TB
7801
7802 case scalar_load:
b175b679 7803 return aarch64_tune_params.vec_costs->scalar_load_cost;
8990e73a
TB
7804
7805 case scalar_store:
b175b679 7806 return aarch64_tune_params.vec_costs->scalar_store_cost;
8990e73a
TB
7807
7808 case vector_stmt:
b175b679 7809 return aarch64_tune_params.vec_costs->vec_stmt_cost;
8990e73a
TB
7810
7811 case vector_load:
b175b679 7812 return aarch64_tune_params.vec_costs->vec_align_load_cost;
8990e73a
TB
7813
7814 case vector_store:
b175b679 7815 return aarch64_tune_params.vec_costs->vec_store_cost;
8990e73a
TB
7816
7817 case vec_to_scalar:
b175b679 7818 return aarch64_tune_params.vec_costs->vec_to_scalar_cost;
8990e73a
TB
7819
7820 case scalar_to_vec:
b175b679 7821 return aarch64_tune_params.vec_costs->scalar_to_vec_cost;
8990e73a
TB
7822
7823 case unaligned_load:
b175b679 7824 return aarch64_tune_params.vec_costs->vec_unalign_load_cost;
8990e73a
TB
7825
7826 case unaligned_store:
b175b679 7827 return aarch64_tune_params.vec_costs->vec_unalign_store_cost;
8990e73a
TB
7828
7829 case cond_branch_taken:
b175b679 7830 return aarch64_tune_params.vec_costs->cond_taken_branch_cost;
8990e73a
TB
7831
7832 case cond_branch_not_taken:
b175b679 7833 return aarch64_tune_params.vec_costs->cond_not_taken_branch_cost;
8990e73a
TB
7834
7835 case vec_perm:
c428f91c
WD
7836 return aarch64_tune_params.vec_costs->vec_permute_cost;
7837
8990e73a 7838 case vec_promote_demote:
b175b679 7839 return aarch64_tune_params.vec_costs->vec_stmt_cost;
8990e73a
TB
7840
7841 case vec_construct:
7842 elements = TYPE_VECTOR_SUBPARTS (vectype);
7843 return elements / 2 + 1;
7844
7845 default:
7846 gcc_unreachable ();
7847 }
7848}
7849
7850/* Implement targetm.vectorize.add_stmt_cost. */
7851static unsigned
7852aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
7853 struct _stmt_vec_info *stmt_info, int misalign,
7854 enum vect_cost_model_location where)
7855{
7856 unsigned *cost = (unsigned *) data;
7857 unsigned retval = 0;
7858
7859 if (flag_vect_cost_model)
7860 {
7861 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
7862 int stmt_cost =
7863 aarch64_builtin_vectorization_cost (kind, vectype, misalign);
7864
7865 /* Statements in an inner loop relative to the loop being
7866 vectorized are weighted more heavily. The value here is
058e4c71 7867 arbitrary and could potentially be improved with analysis. */
8990e73a 7868 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
058e4c71 7869 count *= 50; /* FIXME */
8990e73a
TB
7870
7871 retval = (unsigned) (count * stmt_cost);
7872 cost[where] += retval;
7873 }
7874
7875 return retval;
7876}
7877
0cfff2a1 7878static void initialize_aarch64_code_model (struct gcc_options *);
43e9d192 7879
0cfff2a1
KT
7880/* Parse the TO_PARSE string and put the architecture struct that it
7881 selects into RES and the architectural features into ISA_FLAGS.
7882 Return an aarch64_parse_opt_result describing the parse result.
7883 If there is an error parsing, RES and ISA_FLAGS are left unchanged. */
43e9d192 7884
0cfff2a1
KT
7885static enum aarch64_parse_opt_result
7886aarch64_parse_arch (const char *to_parse, const struct processor **res,
7887 unsigned long *isa_flags)
43e9d192
IB
7888{
7889 char *ext;
7890 const struct processor *arch;
0cfff2a1 7891 char *str = (char *) alloca (strlen (to_parse) + 1);
43e9d192
IB
7892 size_t len;
7893
0cfff2a1 7894 strcpy (str, to_parse);
43e9d192
IB
7895
7896 ext = strchr (str, '+');
7897
7898 if (ext != NULL)
7899 len = ext - str;
7900 else
7901 len = strlen (str);
7902
7903 if (len == 0)
0cfff2a1
KT
7904 return AARCH64_PARSE_MISSING_ARG;
7905
43e9d192 7906
0cfff2a1 7907 /* Loop through the list of supported ARCHes to find a match. */
43e9d192
IB
7908 for (arch = all_architectures; arch->name != NULL; arch++)
7909 {
7910 if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
7911 {
0cfff2a1 7912 unsigned long isa_temp = arch->flags;
43e9d192
IB
7913
7914 if (ext != NULL)
7915 {
0cfff2a1
KT
7916 /* TO_PARSE string contains at least one extension. */
7917 enum aarch64_parse_opt_result ext_res
7918 = aarch64_parse_extension (ext, &isa_temp);
43e9d192 7919
0cfff2a1
KT
7920 if (ext_res != AARCH64_PARSE_OK)
7921 return ext_res;
ffee7aa9 7922 }
0cfff2a1
KT
7923 /* Extension parsing was successful. Confirm the result
7924 arch and ISA flags. */
7925 *res = arch;
7926 *isa_flags = isa_temp;
7927 return AARCH64_PARSE_OK;
43e9d192
IB
7928 }
7929 }
7930
7931 /* ARCH name not found in list. */
0cfff2a1 7932 return AARCH64_PARSE_INVALID_ARG;
43e9d192
IB
7933}
7934
0cfff2a1
KT
7935/* Parse the TO_PARSE string and put the result tuning in RES and the
7936 architecture flags in ISA_FLAGS. Return an aarch64_parse_opt_result
7937 describing the parse result. If there is an error parsing, RES and
7938 ISA_FLAGS are left unchanged. */
43e9d192 7939
0cfff2a1
KT
7940static enum aarch64_parse_opt_result
7941aarch64_parse_cpu (const char *to_parse, const struct processor **res,
7942 unsigned long *isa_flags)
43e9d192
IB
7943{
7944 char *ext;
7945 const struct processor *cpu;
0cfff2a1 7946 char *str = (char *) alloca (strlen (to_parse) + 1);
43e9d192
IB
7947 size_t len;
7948
0cfff2a1 7949 strcpy (str, to_parse);
43e9d192
IB
7950
7951 ext = strchr (str, '+');
7952
7953 if (ext != NULL)
7954 len = ext - str;
7955 else
7956 len = strlen (str);
7957
7958 if (len == 0)
0cfff2a1
KT
7959 return AARCH64_PARSE_MISSING_ARG;
7960
43e9d192
IB
7961
7962 /* Loop through the list of supported CPUs to find a match. */
7963 for (cpu = all_cores; cpu->name != NULL; cpu++)
7964 {
7965 if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
7966 {
0cfff2a1
KT
7967 unsigned long isa_temp = cpu->flags;
7968
43e9d192
IB
7969
7970 if (ext != NULL)
7971 {
0cfff2a1
KT
7972 /* TO_PARSE string contains at least one extension. */
7973 enum aarch64_parse_opt_result ext_res
7974 = aarch64_parse_extension (ext, &isa_temp);
43e9d192 7975
0cfff2a1
KT
7976 if (ext_res != AARCH64_PARSE_OK)
7977 return ext_res;
7978 }
7979 /* Extension parsing was successfull. Confirm the result
7980 cpu and ISA flags. */
7981 *res = cpu;
7982 *isa_flags = isa_temp;
7983 return AARCH64_PARSE_OK;
43e9d192
IB
7984 }
7985 }
7986
7987 /* CPU name not found in list. */
0cfff2a1 7988 return AARCH64_PARSE_INVALID_ARG;
43e9d192
IB
7989}
7990
0cfff2a1
KT
7991/* Parse the TO_PARSE string and put the cpu it selects into RES.
7992 Return an aarch64_parse_opt_result describing the parse result.
7993 If the parsing fails the RES does not change. */
43e9d192 7994
0cfff2a1
KT
7995static enum aarch64_parse_opt_result
7996aarch64_parse_tune (const char *to_parse, const struct processor **res)
43e9d192
IB
7997{
7998 const struct processor *cpu;
0cfff2a1
KT
7999 char *str = (char *) alloca (strlen (to_parse) + 1);
8000
8001 strcpy (str, to_parse);
43e9d192
IB
8002
8003 /* Loop through the list of supported CPUs to find a match. */
8004 for (cpu = all_cores; cpu->name != NULL; cpu++)
8005 {
8006 if (strcmp (cpu->name, str) == 0)
8007 {
0cfff2a1
KT
8008 *res = cpu;
8009 return AARCH64_PARSE_OK;
43e9d192
IB
8010 }
8011 }
8012
8013 /* CPU name not found in list. */
0cfff2a1 8014 return AARCH64_PARSE_INVALID_ARG;
43e9d192
IB
8015}
8016
8dec06f2
JG
8017/* Parse TOKEN, which has length LENGTH to see if it is an option
8018 described in FLAG. If it is, return the index bit for that fusion type.
8019 If not, error (printing OPTION_NAME) and return zero. */
8020
8021static unsigned int
8022aarch64_parse_one_option_token (const char *token,
8023 size_t length,
8024 const struct aarch64_flag_desc *flag,
8025 const char *option_name)
8026{
8027 for (; flag->name != NULL; flag++)
8028 {
8029 if (length == strlen (flag->name)
8030 && !strncmp (flag->name, token, length))
8031 return flag->flag;
8032 }
8033
8034 error ("unknown flag passed in -moverride=%s (%s)", option_name, token);
8035 return 0;
8036}
8037
8038/* Parse OPTION which is a comma-separated list of flags to enable.
8039 FLAGS gives the list of flags we understand, INITIAL_STATE gives any
8040 default state we inherit from the CPU tuning structures. OPTION_NAME
8041 gives the top-level option we are parsing in the -moverride string,
8042 for use in error messages. */
8043
8044static unsigned int
8045aarch64_parse_boolean_options (const char *option,
8046 const struct aarch64_flag_desc *flags,
8047 unsigned int initial_state,
8048 const char *option_name)
8049{
8050 const char separator = '.';
8051 const char* specs = option;
8052 const char* ntoken = option;
8053 unsigned int found_flags = initial_state;
8054
8055 while ((ntoken = strchr (specs, separator)))
8056 {
8057 size_t token_length = ntoken - specs;
8058 unsigned token_ops = aarch64_parse_one_option_token (specs,
8059 token_length,
8060 flags,
8061 option_name);
8062 /* If we find "none" (or, for simplicity's sake, an error) anywhere
8063 in the token stream, reset the supported operations. So:
8064
8065 adrp+add.cmp+branch.none.adrp+add
8066
8067 would have the result of turning on only adrp+add fusion. */
8068 if (!token_ops)
8069 found_flags = 0;
8070
8071 found_flags |= token_ops;
8072 specs = ++ntoken;
8073 }
8074
8075 /* We ended with a comma, print something. */
8076 if (!(*specs))
8077 {
8078 error ("%s string ill-formed\n", option_name);
8079 return 0;
8080 }
8081
8082 /* We still have one more token to parse. */
8083 size_t token_length = strlen (specs);
8084 unsigned token_ops = aarch64_parse_one_option_token (specs,
8085 token_length,
8086 flags,
8087 option_name);
8088 if (!token_ops)
8089 found_flags = 0;
8090
8091 found_flags |= token_ops;
8092 return found_flags;
8093}
8094
8095/* Support for overriding instruction fusion. */
8096
8097static void
8098aarch64_parse_fuse_string (const char *fuse_string,
8099 struct tune_params *tune)
8100{
8101 tune->fusible_ops = aarch64_parse_boolean_options (fuse_string,
8102 aarch64_fusible_pairs,
8103 tune->fusible_ops,
8104 "fuse=");
8105}
8106
8107/* Support for overriding other tuning flags. */
8108
8109static void
8110aarch64_parse_tune_string (const char *tune_string,
8111 struct tune_params *tune)
8112{
8113 tune->extra_tuning_flags
8114 = aarch64_parse_boolean_options (tune_string,
8115 aarch64_tuning_flags,
8116 tune->extra_tuning_flags,
8117 "tune=");
8118}
8119
8120/* Parse TOKEN, which has length LENGTH to see if it is a tuning option
8121 we understand. If it is, extract the option string and handoff to
8122 the appropriate function. */
8123
8124void
8125aarch64_parse_one_override_token (const char* token,
8126 size_t length,
8127 struct tune_params *tune)
8128{
8129 const struct aarch64_tuning_override_function *fn
8130 = aarch64_tuning_override_functions;
8131
8132 const char *option_part = strchr (token, '=');
8133 if (!option_part)
8134 {
8135 error ("tuning string missing in option (%s)", token);
8136 return;
8137 }
8138
8139 /* Get the length of the option name. */
8140 length = option_part - token;
8141 /* Skip the '=' to get to the option string. */
8142 option_part++;
8143
8144 for (; fn->name != NULL; fn++)
8145 {
8146 if (!strncmp (fn->name, token, length))
8147 {
8148 fn->parse_override (option_part, tune);
8149 return;
8150 }
8151 }
8152
8153 error ("unknown tuning option (%s)",token);
8154 return;
8155}
8156
5eee3c34
JW
8157/* A checking mechanism for the implementation of the tls size. */
8158
8159static void
8160initialize_aarch64_tls_size (struct gcc_options *opts)
8161{
8162 if (aarch64_tls_size == 0)
8163 aarch64_tls_size = 24;
8164
8165 switch (opts->x_aarch64_cmodel_var)
8166 {
8167 case AARCH64_CMODEL_TINY:
8168 /* Both the default and maximum TLS size allowed under tiny is 1M which
8169 needs two instructions to address, so we clamp the size to 24. */
8170 if (aarch64_tls_size > 24)
8171 aarch64_tls_size = 24;
8172 break;
8173 case AARCH64_CMODEL_SMALL:
8174 /* The maximum TLS size allowed under small is 4G. */
8175 if (aarch64_tls_size > 32)
8176 aarch64_tls_size = 32;
8177 break;
8178 case AARCH64_CMODEL_LARGE:
8179 /* The maximum TLS size allowed under large is 16E.
8180 FIXME: 16E should be 64bit, we only support 48bit offset now. */
8181 if (aarch64_tls_size > 48)
8182 aarch64_tls_size = 48;
8183 break;
8184 default:
8185 gcc_unreachable ();
8186 }
8187
8188 return;
8189}
8190
8dec06f2
JG
8191/* Parse STRING looking for options in the format:
8192 string :: option:string
8193 option :: name=substring
8194 name :: {a-z}
8195 substring :: defined by option. */
8196
8197static void
8198aarch64_parse_override_string (const char* input_string,
8199 struct tune_params* tune)
8200{
8201 const char separator = ':';
8202 size_t string_length = strlen (input_string) + 1;
8203 char *string_root = (char *) xmalloc (sizeof (*string_root) * string_length);
8204 char *string = string_root;
8205 strncpy (string, input_string, string_length);
8206 string[string_length - 1] = '\0';
8207
8208 char* ntoken = string;
8209
8210 while ((ntoken = strchr (string, separator)))
8211 {
8212 size_t token_length = ntoken - string;
8213 /* Make this substring look like a string. */
8214 *ntoken = '\0';
8215 aarch64_parse_one_override_token (string, token_length, tune);
8216 string = ++ntoken;
8217 }
8218
8219 /* One last option to parse. */
8220 aarch64_parse_one_override_token (string, strlen (string), tune);
8221 free (string_root);
8222}
43e9d192 8223
43e9d192
IB
8224
8225static void
0cfff2a1 8226aarch64_override_options_after_change_1 (struct gcc_options *opts)
43e9d192 8227{
a3dc8760
NC
8228 /* The logic here is that if we are disabling all frame pointer generation
8229 then we do not need to disable leaf frame pointer generation as a
8230 separate operation. But if we are *only* disabling leaf frame pointer
8231 generation then we set flag_omit_frame_pointer to true, but in
8232 aarch64_frame_pointer_required we return false only for leaf functions.
8233
8234 PR 70044: We have to be careful about being called multiple times for the
8235 same function. Once we have decided to set flag_omit_frame_pointer just
8236 so that we can omit leaf frame pointers, we must then not interpret a
8237 second call as meaning that all frame pointer generation should be
8238 omitted. We do this by setting flag_omit_frame_pointer to a special,
8239 non-zero value. */
8240 if (opts->x_flag_omit_frame_pointer == 2)
8241 opts->x_flag_omit_frame_pointer = 0;
8242
0cfff2a1
KT
8243 if (opts->x_flag_omit_frame_pointer)
8244 opts->x_flag_omit_leaf_frame_pointer = false;
8245 else if (opts->x_flag_omit_leaf_frame_pointer)
a3dc8760 8246 opts->x_flag_omit_frame_pointer = 2;
43e9d192 8247
1be34295 8248 /* If not optimizing for size, set the default
0cfff2a1
KT
8249 alignment to what the target wants. */
8250 if (!opts->x_optimize_size)
43e9d192 8251 {
0cfff2a1
KT
8252 if (opts->x_align_loops <= 0)
8253 opts->x_align_loops = aarch64_tune_params.loop_align;
8254 if (opts->x_align_jumps <= 0)
8255 opts->x_align_jumps = aarch64_tune_params.jump_align;
8256 if (opts->x_align_functions <= 0)
8257 opts->x_align_functions = aarch64_tune_params.function_align;
43e9d192 8258 }
b4f50fd4 8259
9ee6540a
WD
8260 /* We default to no pc-relative literal loads. */
8261
8262 aarch64_pcrelative_literal_loads = false;
8263
8264 /* If -mpc-relative-literal-loads is set on the command line, this
b4f50fd4 8265 implies that the user asked for PC relative literal loads. */
9ee6540a
WD
8266 if (opts->x_pcrelative_literal_loads == 1)
8267 aarch64_pcrelative_literal_loads = true;
b4f50fd4 8268
48bb1a55
CL
8269 /* This is PR70113. When building the Linux kernel with
8270 CONFIG_ARM64_ERRATUM_843419, support for relocations
8271 R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_ADR_PREL_PG_HI21_NC is
8272 removed from the kernel to avoid loading objects with possibly
9ee6540a 8273 offending sequences. Without -mpc-relative-literal-loads we would
48bb1a55
CL
8274 generate such relocations, preventing the kernel build from
8275 succeeding. */
9ee6540a
WD
8276 if (opts->x_pcrelative_literal_loads == 2
8277 && TARGET_FIX_ERR_A53_843419)
8278 aarch64_pcrelative_literal_loads = true;
8279
8280 /* In the tiny memory model it makes no sense to disallow PC relative
8281 literal pool loads. */
8282 if (aarch64_cmodel == AARCH64_CMODEL_TINY
8283 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
8284 aarch64_pcrelative_literal_loads = true;
98daafa0
EM
8285
8286 /* When enabling the lower precision Newton series for the square root, also
8287 enable it for the reciprocal square root, since the latter is an
8288 intermediary step for the former. */
8289 if (flag_mlow_precision_sqrt)
8290 flag_mrecip_low_precision_sqrt = true;
0cfff2a1 8291}
43e9d192 8292
0cfff2a1
KT
8293/* 'Unpack' up the internal tuning structs and update the options
8294 in OPTS. The caller must have set up selected_tune and selected_arch
8295 as all the other target-specific codegen decisions are
8296 derived from them. */
8297
e4ea20c8 8298void
0cfff2a1
KT
8299aarch64_override_options_internal (struct gcc_options *opts)
8300{
8301 aarch64_tune_flags = selected_tune->flags;
8302 aarch64_tune = selected_tune->sched_core;
8303 /* Make a copy of the tuning parameters attached to the core, which
8304 we may later overwrite. */
8305 aarch64_tune_params = *(selected_tune->tune);
8306 aarch64_architecture_version = selected_arch->architecture_version;
8307
8308 if (opts->x_aarch64_override_tune_string)
8309 aarch64_parse_override_string (opts->x_aarch64_override_tune_string,
8310 &aarch64_tune_params);
8311
8312 /* This target defaults to strict volatile bitfields. */
8313 if (opts->x_flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
8314 opts->x_flag_strict_volatile_bitfields = 1;
8315
0cfff2a1 8316 initialize_aarch64_code_model (opts);
5eee3c34 8317 initialize_aarch64_tls_size (opts);
63892fa2 8318
2d6bc7fa
KT
8319 int queue_depth = 0;
8320 switch (aarch64_tune_params.autoprefetcher_model)
8321 {
8322 case tune_params::AUTOPREFETCHER_OFF:
8323 queue_depth = -1;
8324 break;
8325 case tune_params::AUTOPREFETCHER_WEAK:
8326 queue_depth = 0;
8327 break;
8328 case tune_params::AUTOPREFETCHER_STRONG:
8329 queue_depth = max_insn_queue_index + 1;
8330 break;
8331 default:
8332 gcc_unreachable ();
8333 }
8334
8335 /* We don't mind passing in global_options_set here as we don't use
8336 the *options_set structs anyway. */
8337 maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
8338 queue_depth,
8339 opts->x_param_values,
8340 global_options_set.x_param_values);
8341
50487d79
EM
8342 /* Set the L1 cache line size. */
8343 if (selected_cpu->tune->cache_line_size != 0)
8344 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
8345 selected_cpu->tune->cache_line_size,
8346 opts->x_param_values,
8347 global_options_set.x_param_values);
8348
0cfff2a1
KT
8349 aarch64_override_options_after_change_1 (opts);
8350}
43e9d192 8351
01f44038
KT
8352/* Print a hint with a suggestion for a core or architecture name that
8353 most closely resembles what the user passed in STR. ARCH is true if
8354 the user is asking for an architecture name. ARCH is false if the user
8355 is asking for a core name. */
8356
8357static void
8358aarch64_print_hint_for_core_or_arch (const char *str, bool arch)
8359{
8360 auto_vec<const char *> candidates;
8361 const struct processor *entry = arch ? all_architectures : all_cores;
8362 for (; entry->name != NULL; entry++)
8363 candidates.safe_push (entry->name);
8364 char *s;
8365 const char *hint = candidates_list_and_hint (str, s, candidates);
8366 if (hint)
8367 inform (input_location, "valid arguments are: %s;"
8368 " did you mean %qs?", s, hint);
8369 XDELETEVEC (s);
8370}
8371
8372/* Print a hint with a suggestion for a core name that most closely resembles
8373 what the user passed in STR. */
8374
8375inline static void
8376aarch64_print_hint_for_core (const char *str)
8377{
8378 aarch64_print_hint_for_core_or_arch (str, false);
8379}
8380
8381/* Print a hint with a suggestion for an architecture name that most closely
8382 resembles what the user passed in STR. */
8383
8384inline static void
8385aarch64_print_hint_for_arch (const char *str)
8386{
8387 aarch64_print_hint_for_core_or_arch (str, true);
8388}
8389
0cfff2a1
KT
8390/* Validate a command-line -mcpu option. Parse the cpu and extensions (if any)
8391 specified in STR and throw errors if appropriate. Put the results if
361fb3ee
KT
8392 they are valid in RES and ISA_FLAGS. Return whether the option is
8393 valid. */
43e9d192 8394
361fb3ee 8395static bool
0cfff2a1
KT
8396aarch64_validate_mcpu (const char *str, const struct processor **res,
8397 unsigned long *isa_flags)
8398{
8399 enum aarch64_parse_opt_result parse_res
8400 = aarch64_parse_cpu (str, res, isa_flags);
8401
8402 if (parse_res == AARCH64_PARSE_OK)
361fb3ee 8403 return true;
0cfff2a1
KT
8404
8405 switch (parse_res)
8406 {
8407 case AARCH64_PARSE_MISSING_ARG:
8408 error ("missing cpu name in -mcpu=%qs", str);
8409 break;
8410 case AARCH64_PARSE_INVALID_ARG:
8411 error ("unknown value %qs for -mcpu", str);
01f44038 8412 aarch64_print_hint_for_core (str);
0cfff2a1
KT
8413 break;
8414 case AARCH64_PARSE_INVALID_FEATURE:
8415 error ("invalid feature modifier in -mcpu=%qs", str);
8416 break;
8417 default:
8418 gcc_unreachable ();
8419 }
361fb3ee
KT
8420
8421 return false;
0cfff2a1
KT
8422}
8423
8424/* Validate a command-line -march option. Parse the arch and extensions
8425 (if any) specified in STR and throw errors if appropriate. Put the
361fb3ee
KT
8426 results, if they are valid, in RES and ISA_FLAGS. Return whether the
8427 option is valid. */
0cfff2a1 8428
361fb3ee 8429static bool
0cfff2a1 8430aarch64_validate_march (const char *str, const struct processor **res,
01f44038 8431 unsigned long *isa_flags)
0cfff2a1
KT
8432{
8433 enum aarch64_parse_opt_result parse_res
8434 = aarch64_parse_arch (str, res, isa_flags);
8435
8436 if (parse_res == AARCH64_PARSE_OK)
361fb3ee 8437 return true;
0cfff2a1
KT
8438
8439 switch (parse_res)
8440 {
8441 case AARCH64_PARSE_MISSING_ARG:
8442 error ("missing arch name in -march=%qs", str);
8443 break;
8444 case AARCH64_PARSE_INVALID_ARG:
8445 error ("unknown value %qs for -march", str);
01f44038 8446 aarch64_print_hint_for_arch (str);
0cfff2a1
KT
8447 break;
8448 case AARCH64_PARSE_INVALID_FEATURE:
8449 error ("invalid feature modifier in -march=%qs", str);
8450 break;
8451 default:
8452 gcc_unreachable ();
8453 }
361fb3ee
KT
8454
8455 return false;
0cfff2a1
KT
8456}
8457
8458/* Validate a command-line -mtune option. Parse the cpu
8459 specified in STR and throw errors if appropriate. Put the
361fb3ee
KT
8460 result, if it is valid, in RES. Return whether the option is
8461 valid. */
0cfff2a1 8462
361fb3ee 8463static bool
0cfff2a1
KT
8464aarch64_validate_mtune (const char *str, const struct processor **res)
8465{
8466 enum aarch64_parse_opt_result parse_res
8467 = aarch64_parse_tune (str, res);
8468
8469 if (parse_res == AARCH64_PARSE_OK)
361fb3ee 8470 return true;
0cfff2a1
KT
8471
8472 switch (parse_res)
8473 {
8474 case AARCH64_PARSE_MISSING_ARG:
8475 error ("missing cpu name in -mtune=%qs", str);
8476 break;
8477 case AARCH64_PARSE_INVALID_ARG:
8478 error ("unknown value %qs for -mtune", str);
01f44038 8479 aarch64_print_hint_for_core (str);
0cfff2a1
KT
8480 break;
8481 default:
8482 gcc_unreachable ();
8483 }
361fb3ee
KT
8484 return false;
8485}
8486
8487/* Return the CPU corresponding to the enum CPU.
8488 If it doesn't specify a cpu, return the default. */
8489
8490static const struct processor *
8491aarch64_get_tune_cpu (enum aarch64_processor cpu)
8492{
8493 if (cpu != aarch64_none)
8494 return &all_cores[cpu];
8495
8496 /* The & 0x3f is to extract the bottom 6 bits that encode the
8497 default cpu as selected by the --with-cpu GCC configure option
8498 in config.gcc.
8499 ???: The whole TARGET_CPU_DEFAULT and AARCH64_CPU_DEFAULT_FLAGS
8500 flags mechanism should be reworked to make it more sane. */
8501 return &all_cores[TARGET_CPU_DEFAULT & 0x3f];
8502}
8503
8504/* Return the architecture corresponding to the enum ARCH.
8505 If it doesn't specify a valid architecture, return the default. */
8506
8507static const struct processor *
8508aarch64_get_arch (enum aarch64_arch arch)
8509{
8510 if (arch != aarch64_no_arch)
8511 return &all_architectures[arch];
8512
8513 const struct processor *cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
8514
8515 return &all_architectures[cpu->arch];
0cfff2a1
KT
8516}
8517
8518/* Implement TARGET_OPTION_OVERRIDE. This is called once in the beginning
8519 and is used to parse the -m{cpu,tune,arch} strings and setup the initial
8520 tuning structs. In particular it must set selected_tune and
8521 aarch64_isa_flags that define the available ISA features and tuning
8522 decisions. It must also set selected_arch as this will be used to
8523 output the .arch asm tags for each function. */
8524
8525static void
8526aarch64_override_options (void)
8527{
8528 unsigned long cpu_isa = 0;
8529 unsigned long arch_isa = 0;
8530 aarch64_isa_flags = 0;
8531
361fb3ee
KT
8532 bool valid_cpu = true;
8533 bool valid_tune = true;
8534 bool valid_arch = true;
8535
0cfff2a1
KT
8536 selected_cpu = NULL;
8537 selected_arch = NULL;
8538 selected_tune = NULL;
8539
8540 /* -mcpu=CPU is shorthand for -march=ARCH_FOR_CPU, -mtune=CPU.
8541 If either of -march or -mtune is given, they override their
8542 respective component of -mcpu. */
8543 if (aarch64_cpu_string)
361fb3ee
KT
8544 valid_cpu = aarch64_validate_mcpu (aarch64_cpu_string, &selected_cpu,
8545 &cpu_isa);
0cfff2a1
KT
8546
8547 if (aarch64_arch_string)
361fb3ee
KT
8548 valid_arch = aarch64_validate_march (aarch64_arch_string, &selected_arch,
8549 &arch_isa);
0cfff2a1
KT
8550
8551 if (aarch64_tune_string)
361fb3ee 8552 valid_tune = aarch64_validate_mtune (aarch64_tune_string, &selected_tune);
43e9d192
IB
8553
8554 /* If the user did not specify a processor, choose the default
8555 one for them. This will be the CPU set during configuration using
a3cd0246 8556 --with-cpu, otherwise it is "generic". */
43e9d192
IB
8557 if (!selected_cpu)
8558 {
0cfff2a1
KT
8559 if (selected_arch)
8560 {
8561 selected_cpu = &all_cores[selected_arch->ident];
8562 aarch64_isa_flags = arch_isa;
361fb3ee 8563 explicit_arch = selected_arch->arch;
0cfff2a1
KT
8564 }
8565 else
8566 {
361fb3ee
KT
8567 /* Get default configure-time CPU. */
8568 selected_cpu = aarch64_get_tune_cpu (aarch64_none);
0cfff2a1
KT
8569 aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
8570 }
361fb3ee
KT
8571
8572 if (selected_tune)
8573 explicit_tune_core = selected_tune->ident;
0cfff2a1
KT
8574 }
8575 /* If both -mcpu and -march are specified check that they are architecturally
8576 compatible, warn if they're not and prefer the -march ISA flags. */
8577 else if (selected_arch)
8578 {
8579 if (selected_arch->arch != selected_cpu->arch)
8580 {
8581 warning (0, "switch -mcpu=%s conflicts with -march=%s switch",
8582 all_architectures[selected_cpu->arch].name,
8583 selected_arch->name);
8584 }
8585 aarch64_isa_flags = arch_isa;
361fb3ee
KT
8586 explicit_arch = selected_arch->arch;
8587 explicit_tune_core = selected_tune ? selected_tune->ident
8588 : selected_cpu->ident;
0cfff2a1
KT
8589 }
8590 else
8591 {
8592 /* -mcpu but no -march. */
8593 aarch64_isa_flags = cpu_isa;
361fb3ee
KT
8594 explicit_tune_core = selected_tune ? selected_tune->ident
8595 : selected_cpu->ident;
8596 gcc_assert (selected_cpu);
8597 selected_arch = &all_architectures[selected_cpu->arch];
8598 explicit_arch = selected_arch->arch;
43e9d192
IB
8599 }
8600
0cfff2a1
KT
8601 /* Set the arch as well as we will need it when outputing
8602 the .arch directive in assembly. */
8603 if (!selected_arch)
8604 {
8605 gcc_assert (selected_cpu);
8606 selected_arch = &all_architectures[selected_cpu->arch];
8607 }
43e9d192 8608
43e9d192 8609 if (!selected_tune)
3edaf26d 8610 selected_tune = selected_cpu;
43e9d192 8611
0cfff2a1
KT
8612#ifndef HAVE_AS_MABI_OPTION
8613 /* The compiler may have been configured with 2.23.* binutils, which does
8614 not have support for ILP32. */
8615 if (TARGET_ILP32)
8616 error ("Assembler does not support -mabi=ilp32");
8617#endif
43e9d192 8618
361fb3ee
KT
8619 /* Make sure we properly set up the explicit options. */
8620 if ((aarch64_cpu_string && valid_cpu)
8621 || (aarch64_tune_string && valid_tune))
8622 gcc_assert (explicit_tune_core != aarch64_none);
8623
8624 if ((aarch64_cpu_string && valid_cpu)
8625 || (aarch64_arch_string && valid_arch))
8626 gcc_assert (explicit_arch != aarch64_no_arch);
8627
0cfff2a1
KT
8628 aarch64_override_options_internal (&global_options);
8629
8630 /* Save these options as the default ones in case we push and pop them later
8631 while processing functions with potential target attributes. */
8632 target_option_default_node = target_option_current_node
8633 = build_target_option_node (&global_options);
43e9d192
IB
8634}
8635
8636/* Implement targetm.override_options_after_change. */
8637
8638static void
8639aarch64_override_options_after_change (void)
8640{
0cfff2a1 8641 aarch64_override_options_after_change_1 (&global_options);
43e9d192
IB
8642}
8643
8644static struct machine_function *
8645aarch64_init_machine_status (void)
8646{
8647 struct machine_function *machine;
766090c2 8648 machine = ggc_cleared_alloc<machine_function> ();
43e9d192
IB
8649 return machine;
8650}
8651
8652void
8653aarch64_init_expanders (void)
8654{
8655 init_machine_status = aarch64_init_machine_status;
8656}
8657
8658/* A checking mechanism for the implementation of the various code models. */
8659static void
0cfff2a1 8660initialize_aarch64_code_model (struct gcc_options *opts)
43e9d192 8661{
0cfff2a1 8662 if (opts->x_flag_pic)
43e9d192 8663 {
0cfff2a1 8664 switch (opts->x_aarch64_cmodel_var)
43e9d192
IB
8665 {
8666 case AARCH64_CMODEL_TINY:
8667 aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
8668 break;
8669 case AARCH64_CMODEL_SMALL:
34ecdb0f 8670#ifdef HAVE_AS_SMALL_PIC_RELOCS
1b1e81f8
JW
8671 aarch64_cmodel = (flag_pic == 2
8672 ? AARCH64_CMODEL_SMALL_PIC
8673 : AARCH64_CMODEL_SMALL_SPIC);
34ecdb0f
JW
8674#else
8675 aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
8676#endif
43e9d192
IB
8677 break;
8678 case AARCH64_CMODEL_LARGE:
8679 sorry ("code model %qs with -f%s", "large",
0cfff2a1 8680 opts->x_flag_pic > 1 ? "PIC" : "pic");
1c652781 8681 break;
43e9d192
IB
8682 default:
8683 gcc_unreachable ();
8684 }
8685 }
8686 else
0cfff2a1 8687 aarch64_cmodel = opts->x_aarch64_cmodel_var;
43e9d192
IB
8688}
8689
361fb3ee
KT
8690/* Implement TARGET_OPTION_SAVE. */
8691
8692static void
8693aarch64_option_save (struct cl_target_option *ptr, struct gcc_options *opts)
8694{
8695 ptr->x_aarch64_override_tune_string = opts->x_aarch64_override_tune_string;
8696}
8697
8698/* Implements TARGET_OPTION_RESTORE. Restore the backend codegen decisions
8699 using the information saved in PTR. */
8700
8701static void
8702aarch64_option_restore (struct gcc_options *opts, struct cl_target_option *ptr)
8703{
8704 opts->x_explicit_tune_core = ptr->x_explicit_tune_core;
8705 selected_tune = aarch64_get_tune_cpu (ptr->x_explicit_tune_core);
8706 opts->x_explicit_arch = ptr->x_explicit_arch;
8707 selected_arch = aarch64_get_arch (ptr->x_explicit_arch);
8708 opts->x_aarch64_override_tune_string = ptr->x_aarch64_override_tune_string;
8709
8710 aarch64_override_options_internal (opts);
8711}
8712
8713/* Implement TARGET_OPTION_PRINT. */
8714
8715static void
8716aarch64_option_print (FILE *file, int indent, struct cl_target_option *ptr)
8717{
8718 const struct processor *cpu
8719 = aarch64_get_tune_cpu (ptr->x_explicit_tune_core);
8720 unsigned long isa_flags = ptr->x_aarch64_isa_flags;
8721 const struct processor *arch = aarch64_get_arch (ptr->x_explicit_arch);
054b4005 8722 std::string extension
04a99ebe 8723 = aarch64_get_extension_string_for_isa_flags (isa_flags, arch->flags);
361fb3ee
KT
8724
8725 fprintf (file, "%*sselected tune = %s\n", indent, "", cpu->name);
054b4005
JG
8726 fprintf (file, "%*sselected arch = %s%s\n", indent, "",
8727 arch->name, extension.c_str ());
361fb3ee
KT
8728}
8729
d78006d9
KT
8730static GTY(()) tree aarch64_previous_fndecl;
8731
e4ea20c8
KT
8732void
8733aarch64_reset_previous_fndecl (void)
8734{
8735 aarch64_previous_fndecl = NULL;
8736}
8737
acfc1ac1
KT
8738/* Restore or save the TREE_TARGET_GLOBALS from or to NEW_TREE.
8739 Used by aarch64_set_current_function and aarch64_pragma_target_parse to
8740 make sure optab availability predicates are recomputed when necessary. */
8741
8742void
8743aarch64_save_restore_target_globals (tree new_tree)
8744{
8745 if (TREE_TARGET_GLOBALS (new_tree))
8746 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
8747 else if (new_tree == target_option_default_node)
8748 restore_target_globals (&default_target_globals);
8749 else
8750 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
8751}
8752
d78006d9
KT
8753/* Implement TARGET_SET_CURRENT_FUNCTION. Unpack the codegen decisions
8754 like tuning and ISA features from the DECL_FUNCTION_SPECIFIC_TARGET
8755 of the function, if such exists. This function may be called multiple
8756 times on a single function so use aarch64_previous_fndecl to avoid
8757 setting up identical state. */
8758
8759static void
8760aarch64_set_current_function (tree fndecl)
8761{
acfc1ac1
KT
8762 if (!fndecl || fndecl == aarch64_previous_fndecl)
8763 return;
8764
d78006d9
KT
8765 tree old_tree = (aarch64_previous_fndecl
8766 ? DECL_FUNCTION_SPECIFIC_TARGET (aarch64_previous_fndecl)
8767 : NULL_TREE);
8768
acfc1ac1 8769 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
d78006d9 8770
acfc1ac1
KT
8771 /* If current function has no attributes but the previous one did,
8772 use the default node. */
8773 if (!new_tree && old_tree)
8774 new_tree = target_option_default_node;
d78006d9 8775
acfc1ac1
KT
8776 /* If nothing to do, return. #pragma GCC reset or #pragma GCC pop to
8777 the default have been handled by aarch64_save_restore_target_globals from
8778 aarch64_pragma_target_parse. */
8779 if (old_tree == new_tree)
8780 return;
d78006d9 8781
acfc1ac1 8782 aarch64_previous_fndecl = fndecl;
6e17a23b 8783
acfc1ac1
KT
8784 /* First set the target options. */
8785 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
6e17a23b 8786
acfc1ac1 8787 aarch64_save_restore_target_globals (new_tree);
d78006d9 8788}
361fb3ee 8789
5a2c8331
KT
8790/* Enum describing the various ways we can handle attributes.
8791 In many cases we can reuse the generic option handling machinery. */
8792
8793enum aarch64_attr_opt_type
8794{
8795 aarch64_attr_mask, /* Attribute should set a bit in target_flags. */
8796 aarch64_attr_bool, /* Attribute sets or unsets a boolean variable. */
8797 aarch64_attr_enum, /* Attribute sets an enum variable. */
8798 aarch64_attr_custom /* Attribute requires a custom handling function. */
8799};
8800
8801/* All the information needed to handle a target attribute.
8802 NAME is the name of the attribute.
9c582551 8803 ATTR_TYPE specifies the type of behavior of the attribute as described
5a2c8331
KT
8804 in the definition of enum aarch64_attr_opt_type.
8805 ALLOW_NEG is true if the attribute supports a "no-" form.
8806 HANDLER is the function that takes the attribute string and whether
8807 it is a pragma or attribute and handles the option. It is needed only
8808 when the ATTR_TYPE is aarch64_attr_custom.
8809 OPT_NUM is the enum specifying the option that the attribute modifies.
9c582551 8810 This is needed for attributes that mirror the behavior of a command-line
5a2c8331
KT
8811 option, that is it has ATTR_TYPE aarch64_attr_mask, aarch64_attr_bool or
8812 aarch64_attr_enum. */
8813
8814struct aarch64_attribute_info
8815{
8816 const char *name;
8817 enum aarch64_attr_opt_type attr_type;
8818 bool allow_neg;
8819 bool (*handler) (const char *, const char *);
8820 enum opt_code opt_num;
8821};
8822
8823/* Handle the ARCH_STR argument to the arch= target attribute.
8824 PRAGMA_OR_ATTR is used in potential error messages. */
8825
8826static bool
8827aarch64_handle_attr_arch (const char *str, const char *pragma_or_attr)
8828{
8829 const struct processor *tmp_arch = NULL;
8830 enum aarch64_parse_opt_result parse_res
8831 = aarch64_parse_arch (str, &tmp_arch, &aarch64_isa_flags);
8832
8833 if (parse_res == AARCH64_PARSE_OK)
8834 {
8835 gcc_assert (tmp_arch);
8836 selected_arch = tmp_arch;
8837 explicit_arch = selected_arch->arch;
8838 return true;
8839 }
8840
8841 switch (parse_res)
8842 {
8843 case AARCH64_PARSE_MISSING_ARG:
8844 error ("missing architecture name in 'arch' target %s", pragma_or_attr);
8845 break;
8846 case AARCH64_PARSE_INVALID_ARG:
8847 error ("unknown value %qs for 'arch' target %s", str, pragma_or_attr);
01f44038 8848 aarch64_print_hint_for_arch (str);
5a2c8331
KT
8849 break;
8850 case AARCH64_PARSE_INVALID_FEATURE:
8851 error ("invalid feature modifier %qs for 'arch' target %s",
8852 str, pragma_or_attr);
8853 break;
8854 default:
8855 gcc_unreachable ();
8856 }
8857
8858 return false;
8859}
8860
8861/* Handle the argument CPU_STR to the cpu= target attribute.
8862 PRAGMA_OR_ATTR is used in potential error messages. */
8863
8864static bool
8865aarch64_handle_attr_cpu (const char *str, const char *pragma_or_attr)
8866{
8867 const struct processor *tmp_cpu = NULL;
8868 enum aarch64_parse_opt_result parse_res
8869 = aarch64_parse_cpu (str, &tmp_cpu, &aarch64_isa_flags);
8870
8871 if (parse_res == AARCH64_PARSE_OK)
8872 {
8873 gcc_assert (tmp_cpu);
8874 selected_tune = tmp_cpu;
8875 explicit_tune_core = selected_tune->ident;
8876
8877 selected_arch = &all_architectures[tmp_cpu->arch];
8878 explicit_arch = selected_arch->arch;
8879 return true;
8880 }
8881
8882 switch (parse_res)
8883 {
8884 case AARCH64_PARSE_MISSING_ARG:
8885 error ("missing cpu name in 'cpu' target %s", pragma_or_attr);
8886 break;
8887 case AARCH64_PARSE_INVALID_ARG:
8888 error ("unknown value %qs for 'cpu' target %s", str, pragma_or_attr);
01f44038 8889 aarch64_print_hint_for_core (str);
5a2c8331
KT
8890 break;
8891 case AARCH64_PARSE_INVALID_FEATURE:
8892 error ("invalid feature modifier %qs for 'cpu' target %s",
8893 str, pragma_or_attr);
8894 break;
8895 default:
8896 gcc_unreachable ();
8897 }
8898
8899 return false;
8900}
8901
8902/* Handle the argument STR to the tune= target attribute.
8903 PRAGMA_OR_ATTR is used in potential error messages. */
8904
8905static bool
8906aarch64_handle_attr_tune (const char *str, const char *pragma_or_attr)
8907{
8908 const struct processor *tmp_tune = NULL;
8909 enum aarch64_parse_opt_result parse_res
8910 = aarch64_parse_tune (str, &tmp_tune);
8911
8912 if (parse_res == AARCH64_PARSE_OK)
8913 {
8914 gcc_assert (tmp_tune);
8915 selected_tune = tmp_tune;
8916 explicit_tune_core = selected_tune->ident;
8917 return true;
8918 }
8919
8920 switch (parse_res)
8921 {
8922 case AARCH64_PARSE_INVALID_ARG:
8923 error ("unknown value %qs for 'tune' target %s", str, pragma_or_attr);
01f44038 8924 aarch64_print_hint_for_core (str);
5a2c8331
KT
8925 break;
8926 default:
8927 gcc_unreachable ();
8928 }
8929
8930 return false;
8931}
8932
8933/* Parse an architecture extensions target attribute string specified in STR.
8934 For example "+fp+nosimd". Show any errors if needed. Return TRUE
8935 if successful. Update aarch64_isa_flags to reflect the ISA features
8936 modified.
8937 PRAGMA_OR_ATTR is used in potential error messages. */
8938
8939static bool
8940aarch64_handle_attr_isa_flags (char *str, const char *pragma_or_attr)
8941{
8942 enum aarch64_parse_opt_result parse_res;
8943 unsigned long isa_flags = aarch64_isa_flags;
8944
e4ea20c8
KT
8945 /* We allow "+nothing" in the beginning to clear out all architectural
8946 features if the user wants to handpick specific features. */
8947 if (strncmp ("+nothing", str, 8) == 0)
8948 {
8949 isa_flags = 0;
8950 str += 8;
8951 }
8952
5a2c8331
KT
8953 parse_res = aarch64_parse_extension (str, &isa_flags);
8954
8955 if (parse_res == AARCH64_PARSE_OK)
8956 {
8957 aarch64_isa_flags = isa_flags;
8958 return true;
8959 }
8960
8961 switch (parse_res)
8962 {
8963 case AARCH64_PARSE_MISSING_ARG:
8964 error ("missing feature modifier in target %s %qs",
8965 pragma_or_attr, str);
8966 break;
8967
8968 case AARCH64_PARSE_INVALID_FEATURE:
8969 error ("invalid feature modifier in target %s %qs",
8970 pragma_or_attr, str);
8971 break;
8972
8973 default:
8974 gcc_unreachable ();
8975 }
8976
8977 return false;
8978}
8979
8980/* The target attributes that we support. On top of these we also support just
8981 ISA extensions, like __attribute__ ((target ("+crc"))), but that case is
8982 handled explicitly in aarch64_process_one_target_attr. */
8983
8984static const struct aarch64_attribute_info aarch64_attributes[] =
8985{
8986 { "general-regs-only", aarch64_attr_mask, false, NULL,
8987 OPT_mgeneral_regs_only },
8988 { "fix-cortex-a53-835769", aarch64_attr_bool, true, NULL,
8989 OPT_mfix_cortex_a53_835769 },
48bb1a55
CL
8990 { "fix-cortex-a53-843419", aarch64_attr_bool, true, NULL,
8991 OPT_mfix_cortex_a53_843419 },
5a2c8331
KT
8992 { "cmodel", aarch64_attr_enum, false, NULL, OPT_mcmodel_ },
8993 { "strict-align", aarch64_attr_mask, false, NULL, OPT_mstrict_align },
8994 { "omit-leaf-frame-pointer", aarch64_attr_bool, true, NULL,
8995 OPT_momit_leaf_frame_pointer },
8996 { "tls-dialect", aarch64_attr_enum, false, NULL, OPT_mtls_dialect_ },
8997 { "arch", aarch64_attr_custom, false, aarch64_handle_attr_arch,
8998 OPT_march_ },
8999 { "cpu", aarch64_attr_custom, false, aarch64_handle_attr_cpu, OPT_mcpu_ },
9000 { "tune", aarch64_attr_custom, false, aarch64_handle_attr_tune,
9001 OPT_mtune_ },
9002 { NULL, aarch64_attr_custom, false, NULL, OPT____ }
9003};
9004
9005/* Parse ARG_STR which contains the definition of one target attribute.
9006 Show appropriate errors if any or return true if the attribute is valid.
9007 PRAGMA_OR_ATTR holds the string to use in error messages about whether
9008 we're processing a target attribute or pragma. */
9009
9010static bool
9011aarch64_process_one_target_attr (char *arg_str, const char* pragma_or_attr)
9012{
9013 bool invert = false;
9014
9015 size_t len = strlen (arg_str);
9016
9017 if (len == 0)
9018 {
9019 error ("malformed target %s", pragma_or_attr);
9020 return false;
9021 }
9022
9023 char *str_to_check = (char *) alloca (len + 1);
9024 strcpy (str_to_check, arg_str);
9025
9026 /* Skip leading whitespace. */
9027 while (*str_to_check == ' ' || *str_to_check == '\t')
9028 str_to_check++;
9029
9030 /* We have something like __attribute__ ((target ("+fp+nosimd"))).
9031 It is easier to detect and handle it explicitly here rather than going
9032 through the machinery for the rest of the target attributes in this
9033 function. */
9034 if (*str_to_check == '+')
9035 return aarch64_handle_attr_isa_flags (str_to_check, pragma_or_attr);
9036
9037 if (len > 3 && strncmp (str_to_check, "no-", 3) == 0)
9038 {
9039 invert = true;
9040 str_to_check += 3;
9041 }
9042 char *arg = strchr (str_to_check, '=');
9043
9044 /* If we found opt=foo then terminate STR_TO_CHECK at the '='
9045 and point ARG to "foo". */
9046 if (arg)
9047 {
9048 *arg = '\0';
9049 arg++;
9050 }
9051 const struct aarch64_attribute_info *p_attr;
16d12992 9052 bool found = false;
5a2c8331
KT
9053 for (p_attr = aarch64_attributes; p_attr->name; p_attr++)
9054 {
9055 /* If the names don't match up, or the user has given an argument
9056 to an attribute that doesn't accept one, or didn't give an argument
9057 to an attribute that expects one, fail to match. */
9058 if (strcmp (str_to_check, p_attr->name) != 0)
9059 continue;
9060
16d12992 9061 found = true;
5a2c8331
KT
9062 bool attr_need_arg_p = p_attr->attr_type == aarch64_attr_custom
9063 || p_attr->attr_type == aarch64_attr_enum;
9064
9065 if (attr_need_arg_p ^ (arg != NULL))
9066 {
9067 error ("target %s %qs does not accept an argument",
9068 pragma_or_attr, str_to_check);
9069 return false;
9070 }
9071
9072 /* If the name matches but the attribute does not allow "no-" versions
9073 then we can't match. */
9074 if (invert && !p_attr->allow_neg)
9075 {
9076 error ("target %s %qs does not allow a negated form",
9077 pragma_or_attr, str_to_check);
9078 return false;
9079 }
9080
9081 switch (p_attr->attr_type)
9082 {
9083 /* Has a custom handler registered.
9084 For example, cpu=, arch=, tune=. */
9085 case aarch64_attr_custom:
9086 gcc_assert (p_attr->handler);
9087 if (!p_attr->handler (arg, pragma_or_attr))
9088 return false;
9089 break;
9090
9091 /* Either set or unset a boolean option. */
9092 case aarch64_attr_bool:
9093 {
9094 struct cl_decoded_option decoded;
9095
9096 generate_option (p_attr->opt_num, NULL, !invert,
9097 CL_TARGET, &decoded);
9098 aarch64_handle_option (&global_options, &global_options_set,
9099 &decoded, input_location);
9100 break;
9101 }
9102 /* Set or unset a bit in the target_flags. aarch64_handle_option
9103 should know what mask to apply given the option number. */
9104 case aarch64_attr_mask:
9105 {
9106 struct cl_decoded_option decoded;
9107 /* We only need to specify the option number.
9108 aarch64_handle_option will know which mask to apply. */
9109 decoded.opt_index = p_attr->opt_num;
9110 decoded.value = !invert;
9111 aarch64_handle_option (&global_options, &global_options_set,
9112 &decoded, input_location);
9113 break;
9114 }
9115 /* Use the option setting machinery to set an option to an enum. */
9116 case aarch64_attr_enum:
9117 {
9118 gcc_assert (arg);
9119 bool valid;
9120 int value;
9121 valid = opt_enum_arg_to_value (p_attr->opt_num, arg,
9122 &value, CL_TARGET);
9123 if (valid)
9124 {
9125 set_option (&global_options, NULL, p_attr->opt_num, value,
9126 NULL, DK_UNSPECIFIED, input_location,
9127 global_dc);
9128 }
9129 else
9130 {
9131 error ("target %s %s=%s is not valid",
9132 pragma_or_attr, str_to_check, arg);
9133 }
9134 break;
9135 }
9136 default:
9137 gcc_unreachable ();
9138 }
9139 }
9140
16d12992
KT
9141 /* If we reached here we either have found an attribute and validated
9142 it or didn't match any. If we matched an attribute but its arguments
9143 were malformed we will have returned false already. */
9144 return found;
5a2c8331
KT
9145}
9146
9147/* Count how many times the character C appears in
9148 NULL-terminated string STR. */
9149
9150static unsigned int
9151num_occurences_in_str (char c, char *str)
9152{
9153 unsigned int res = 0;
9154 while (*str != '\0')
9155 {
9156 if (*str == c)
9157 res++;
9158
9159 str++;
9160 }
9161
9162 return res;
9163}
9164
9165/* Parse the tree in ARGS that contains the target attribute information
9166 and update the global target options space. PRAGMA_OR_ATTR is a string
9167 to be used in error messages, specifying whether this is processing
9168 a target attribute or a target pragma. */
9169
9170bool
9171aarch64_process_target_attr (tree args, const char* pragma_or_attr)
9172{
9173 if (TREE_CODE (args) == TREE_LIST)
9174 {
9175 do
9176 {
9177 tree head = TREE_VALUE (args);
9178 if (head)
9179 {
9180 if (!aarch64_process_target_attr (head, pragma_or_attr))
9181 return false;
9182 }
9183 args = TREE_CHAIN (args);
9184 } while (args);
9185
9186 return true;
9187 }
9188 /* We expect to find a string to parse. */
9189 gcc_assert (TREE_CODE (args) == STRING_CST);
9190
9191 size_t len = strlen (TREE_STRING_POINTER (args));
9192 char *str_to_check = (char *) alloca (len + 1);
9193 strcpy (str_to_check, TREE_STRING_POINTER (args));
9194
9195 if (len == 0)
9196 {
9197 error ("malformed target %s value", pragma_or_attr);
9198 return false;
9199 }
9200
9201 /* Used to catch empty spaces between commas i.e.
9202 attribute ((target ("attr1,,attr2"))). */
9203 unsigned int num_commas = num_occurences_in_str (',', str_to_check);
9204
9205 /* Handle multiple target attributes separated by ','. */
9206 char *token = strtok (str_to_check, ",");
9207
9208 unsigned int num_attrs = 0;
9209 while (token)
9210 {
9211 num_attrs++;
9212 if (!aarch64_process_one_target_attr (token, pragma_or_attr))
9213 {
9214 error ("target %s %qs is invalid", pragma_or_attr, token);
9215 return false;
9216 }
9217
9218 token = strtok (NULL, ",");
9219 }
9220
9221 if (num_attrs != num_commas + 1)
9222 {
9223 error ("malformed target %s list %qs",
9224 pragma_or_attr, TREE_STRING_POINTER (args));
9225 return false;
9226 }
9227
9228 return true;
9229}
9230
9231/* Implement TARGET_OPTION_VALID_ATTRIBUTE_P. This is used to
9232 process attribute ((target ("..."))). */
9233
9234static bool
9235aarch64_option_valid_attribute_p (tree fndecl, tree, tree args, int)
9236{
9237 struct cl_target_option cur_target;
9238 bool ret;
9239 tree old_optimize;
9240 tree new_target, new_optimize;
9241 tree existing_target = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
91d0e8de
KT
9242
9243 /* If what we're processing is the current pragma string then the
9244 target option node is already stored in target_option_current_node
9245 by aarch64_pragma_target_parse in aarch64-c.c. Use that to avoid
9246 having to re-parse the string. This is especially useful to keep
9247 arm_neon.h compile times down since that header contains a lot
9248 of intrinsics enclosed in pragmas. */
9249 if (!existing_target && args == current_target_pragma)
9250 {
9251 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = target_option_current_node;
9252 return true;
9253 }
5a2c8331
KT
9254 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
9255
9256 old_optimize = build_optimization_node (&global_options);
9257 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
9258
9259 /* If the function changed the optimization levels as well as setting
9260 target options, start with the optimizations specified. */
9261 if (func_optimize && func_optimize != old_optimize)
9262 cl_optimization_restore (&global_options,
9263 TREE_OPTIMIZATION (func_optimize));
9264
9265 /* Save the current target options to restore at the end. */
9266 cl_target_option_save (&cur_target, &global_options);
9267
9268 /* If fndecl already has some target attributes applied to it, unpack
9269 them so that we add this attribute on top of them, rather than
9270 overwriting them. */
9271 if (existing_target)
9272 {
9273 struct cl_target_option *existing_options
9274 = TREE_TARGET_OPTION (existing_target);
9275
9276 if (existing_options)
9277 cl_target_option_restore (&global_options, existing_options);
9278 }
9279 else
9280 cl_target_option_restore (&global_options,
9281 TREE_TARGET_OPTION (target_option_current_node));
9282
9283
9284 ret = aarch64_process_target_attr (args, "attribute");
9285
9286 /* Set up any additional state. */
9287 if (ret)
9288 {
9289 aarch64_override_options_internal (&global_options);
e95a988a
KT
9290 /* Initialize SIMD builtins if we haven't already.
9291 Set current_target_pragma to NULL for the duration so that
9292 the builtin initialization code doesn't try to tag the functions
9293 being built with the attributes specified by any current pragma, thus
9294 going into an infinite recursion. */
9295 if (TARGET_SIMD)
9296 {
9297 tree saved_current_target_pragma = current_target_pragma;
9298 current_target_pragma = NULL;
9299 aarch64_init_simd_builtins ();
9300 current_target_pragma = saved_current_target_pragma;
9301 }
5a2c8331
KT
9302 new_target = build_target_option_node (&global_options);
9303 }
9304 else
9305 new_target = NULL;
9306
9307 new_optimize = build_optimization_node (&global_options);
9308
9309 if (fndecl && ret)
9310 {
9311 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
9312
9313 if (old_optimize != new_optimize)
9314 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
9315 }
9316
9317 cl_target_option_restore (&global_options, &cur_target);
9318
9319 if (old_optimize != new_optimize)
9320 cl_optimization_restore (&global_options,
9321 TREE_OPTIMIZATION (old_optimize));
9322 return ret;
9323}
9324
1fd8d40c
KT
9325/* Helper for aarch64_can_inline_p. In the case where CALLER and CALLEE are
9326 tri-bool options (yes, no, don't care) and the default value is
9327 DEF, determine whether to reject inlining. */
9328
9329static bool
9330aarch64_tribools_ok_for_inlining_p (int caller, int callee,
9331 int dont_care, int def)
9332{
9333 /* If the callee doesn't care, always allow inlining. */
9334 if (callee == dont_care)
9335 return true;
9336
9337 /* If the caller doesn't care, always allow inlining. */
9338 if (caller == dont_care)
9339 return true;
9340
9341 /* Otherwise, allow inlining if either the callee and caller values
9342 agree, or if the callee is using the default value. */
9343 return (callee == caller || callee == def);
9344}
9345
9346/* Implement TARGET_CAN_INLINE_P. Decide whether it is valid
9347 to inline CALLEE into CALLER based on target-specific info.
9348 Make sure that the caller and callee have compatible architectural
9349 features. Then go through the other possible target attributes
9350 and see if they can block inlining. Try not to reject always_inline
9351 callees unless they are incompatible architecturally. */
9352
9353static bool
9354aarch64_can_inline_p (tree caller, tree callee)
9355{
9356 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
9357 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
9358
9359 /* If callee has no option attributes, then it is ok to inline. */
9360 if (!callee_tree)
9361 return true;
9362
9363 struct cl_target_option *caller_opts
9364 = TREE_TARGET_OPTION (caller_tree ? caller_tree
9365 : target_option_default_node);
9366
9367 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
9368
9369
9370 /* Callee's ISA flags should be a subset of the caller's. */
9371 if ((caller_opts->x_aarch64_isa_flags & callee_opts->x_aarch64_isa_flags)
9372 != callee_opts->x_aarch64_isa_flags)
9373 return false;
9374
9375 /* Allow non-strict aligned functions inlining into strict
9376 aligned ones. */
9377 if ((TARGET_STRICT_ALIGN_P (caller_opts->x_target_flags)
9378 != TARGET_STRICT_ALIGN_P (callee_opts->x_target_flags))
9379 && !(!TARGET_STRICT_ALIGN_P (callee_opts->x_target_flags)
9380 && TARGET_STRICT_ALIGN_P (caller_opts->x_target_flags)))
9381 return false;
9382
9383 bool always_inline = lookup_attribute ("always_inline",
9384 DECL_ATTRIBUTES (callee));
9385
9386 /* If the architectural features match up and the callee is always_inline
9387 then the other attributes don't matter. */
9388 if (always_inline)
9389 return true;
9390
9391 if (caller_opts->x_aarch64_cmodel_var
9392 != callee_opts->x_aarch64_cmodel_var)
9393 return false;
9394
9395 if (caller_opts->x_aarch64_tls_dialect
9396 != callee_opts->x_aarch64_tls_dialect)
9397 return false;
9398
9399 /* Honour explicit requests to workaround errata. */
9400 if (!aarch64_tribools_ok_for_inlining_p (
9401 caller_opts->x_aarch64_fix_a53_err835769,
9402 callee_opts->x_aarch64_fix_a53_err835769,
9403 2, TARGET_FIX_ERR_A53_835769_DEFAULT))
9404 return false;
9405
48bb1a55
CL
9406 if (!aarch64_tribools_ok_for_inlining_p (
9407 caller_opts->x_aarch64_fix_a53_err843419,
9408 callee_opts->x_aarch64_fix_a53_err843419,
9409 2, TARGET_FIX_ERR_A53_843419))
9410 return false;
9411
1fd8d40c
KT
9412 /* If the user explicitly specified -momit-leaf-frame-pointer for the
9413 caller and calle and they don't match up, reject inlining. */
9414 if (!aarch64_tribools_ok_for_inlining_p (
9415 caller_opts->x_flag_omit_leaf_frame_pointer,
9416 callee_opts->x_flag_omit_leaf_frame_pointer,
9417 2, 1))
9418 return false;
9419
9420 /* If the callee has specific tuning overrides, respect them. */
9421 if (callee_opts->x_aarch64_override_tune_string != NULL
9422 && caller_opts->x_aarch64_override_tune_string == NULL)
9423 return false;
9424
9425 /* If the user specified tuning override strings for the
9426 caller and callee and they don't match up, reject inlining.
9427 We just do a string compare here, we don't analyze the meaning
9428 of the string, as it would be too costly for little gain. */
9429 if (callee_opts->x_aarch64_override_tune_string
9430 && caller_opts->x_aarch64_override_tune_string
9431 && (strcmp (callee_opts->x_aarch64_override_tune_string,
9432 caller_opts->x_aarch64_override_tune_string) != 0))
9433 return false;
9434
9435 return true;
9436}
9437
43e9d192
IB
9438/* Return true if SYMBOL_REF X binds locally. */
9439
9440static bool
9441aarch64_symbol_binds_local_p (const_rtx x)
9442{
9443 return (SYMBOL_REF_DECL (x)
9444 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
9445 : SYMBOL_REF_LOCAL_P (x));
9446}
9447
9448/* Return true if SYMBOL_REF X is thread local */
9449static bool
9450aarch64_tls_symbol_p (rtx x)
9451{
9452 if (! TARGET_HAVE_TLS)
9453 return false;
9454
9455 if (GET_CODE (x) != SYMBOL_REF)
9456 return false;
9457
9458 return SYMBOL_REF_TLS_MODEL (x) != 0;
9459}
9460
9461/* Classify a TLS symbol into one of the TLS kinds. */
9462enum aarch64_symbol_type
9463aarch64_classify_tls_symbol (rtx x)
9464{
9465 enum tls_model tls_kind = tls_symbolic_operand_type (x);
9466
9467 switch (tls_kind)
9468 {
9469 case TLS_MODEL_GLOBAL_DYNAMIC:
9470 case TLS_MODEL_LOCAL_DYNAMIC:
9471 return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
9472
9473 case TLS_MODEL_INITIAL_EXEC:
5ae7caad
JW
9474 switch (aarch64_cmodel)
9475 {
9476 case AARCH64_CMODEL_TINY:
9477 case AARCH64_CMODEL_TINY_PIC:
9478 return SYMBOL_TINY_TLSIE;
9479 default:
79496620 9480 return SYMBOL_SMALL_TLSIE;
5ae7caad 9481 }
43e9d192
IB
9482
9483 case TLS_MODEL_LOCAL_EXEC:
cbf5629e
JW
9484 if (aarch64_tls_size == 12)
9485 return SYMBOL_TLSLE12;
9486 else if (aarch64_tls_size == 24)
9487 return SYMBOL_TLSLE24;
9488 else if (aarch64_tls_size == 32)
9489 return SYMBOL_TLSLE32;
9490 else if (aarch64_tls_size == 48)
9491 return SYMBOL_TLSLE48;
9492 else
9493 gcc_unreachable ();
43e9d192
IB
9494
9495 case TLS_MODEL_EMULATED:
9496 case TLS_MODEL_NONE:
9497 return SYMBOL_FORCE_TO_MEM;
9498
9499 default:
9500 gcc_unreachable ();
9501 }
9502}
9503
9504/* Return the method that should be used to access SYMBOL_REF or
a6e0bfa7 9505 LABEL_REF X. */
17f4d4bf 9506
43e9d192 9507enum aarch64_symbol_type
a6e0bfa7 9508aarch64_classify_symbol (rtx x, rtx offset)
43e9d192
IB
9509{
9510 if (GET_CODE (x) == LABEL_REF)
9511 {
9512 switch (aarch64_cmodel)
9513 {
9514 case AARCH64_CMODEL_LARGE:
9515 return SYMBOL_FORCE_TO_MEM;
9516
9517 case AARCH64_CMODEL_TINY_PIC:
9518 case AARCH64_CMODEL_TINY:
a5350ddc
CSS
9519 return SYMBOL_TINY_ABSOLUTE;
9520
1b1e81f8 9521 case AARCH64_CMODEL_SMALL_SPIC:
43e9d192
IB
9522 case AARCH64_CMODEL_SMALL_PIC:
9523 case AARCH64_CMODEL_SMALL:
9524 return SYMBOL_SMALL_ABSOLUTE;
9525
9526 default:
9527 gcc_unreachable ();
9528 }
9529 }
9530
17f4d4bf 9531 if (GET_CODE (x) == SYMBOL_REF)
43e9d192 9532 {
43e9d192
IB
9533 if (aarch64_tls_symbol_p (x))
9534 return aarch64_classify_tls_symbol (x);
9535
17f4d4bf
CSS
9536 switch (aarch64_cmodel)
9537 {
9538 case AARCH64_CMODEL_TINY:
15f6e0da 9539 /* When we retrieve symbol + offset address, we have to make sure
f8b756b7
TB
9540 the offset does not cause overflow of the final address. But
9541 we have no way of knowing the address of symbol at compile time
9542 so we can't accurately say if the distance between the PC and
9543 symbol + offset is outside the addressible range of +/-1M in the
9544 TINY code model. So we rely on images not being greater than
9545 1M and cap the offset at 1M and anything beyond 1M will have to
15f6e0da
RR
9546 be loaded using an alternative mechanism. Furthermore if the
9547 symbol is a weak reference to something that isn't known to
9548 resolve to a symbol in this module, then force to memory. */
9549 if ((SYMBOL_REF_WEAK (x)
9550 && !aarch64_symbol_binds_local_p (x))
f8b756b7 9551 || INTVAL (offset) < -1048575 || INTVAL (offset) > 1048575)
a5350ddc
CSS
9552 return SYMBOL_FORCE_TO_MEM;
9553 return SYMBOL_TINY_ABSOLUTE;
9554
17f4d4bf 9555 case AARCH64_CMODEL_SMALL:
f8b756b7
TB
9556 /* Same reasoning as the tiny code model, but the offset cap here is
9557 4G. */
15f6e0da
RR
9558 if ((SYMBOL_REF_WEAK (x)
9559 && !aarch64_symbol_binds_local_p (x))
3ff5d1f0
TB
9560 || !IN_RANGE (INTVAL (offset), HOST_WIDE_INT_C (-4294967263),
9561 HOST_WIDE_INT_C (4294967264)))
17f4d4bf
CSS
9562 return SYMBOL_FORCE_TO_MEM;
9563 return SYMBOL_SMALL_ABSOLUTE;
43e9d192 9564
17f4d4bf 9565 case AARCH64_CMODEL_TINY_PIC:
38e6c9a6 9566 if (!aarch64_symbol_binds_local_p (x))
87dd8ab0 9567 return SYMBOL_TINY_GOT;
38e6c9a6
MS
9568 return SYMBOL_TINY_ABSOLUTE;
9569
1b1e81f8 9570 case AARCH64_CMODEL_SMALL_SPIC:
17f4d4bf
CSS
9571 case AARCH64_CMODEL_SMALL_PIC:
9572 if (!aarch64_symbol_binds_local_p (x))
1b1e81f8
JW
9573 return (aarch64_cmodel == AARCH64_CMODEL_SMALL_SPIC
9574 ? SYMBOL_SMALL_GOT_28K : SYMBOL_SMALL_GOT_4G);
17f4d4bf 9575 return SYMBOL_SMALL_ABSOLUTE;
43e9d192 9576
9ee6540a
WD
9577 case AARCH64_CMODEL_LARGE:
9578 /* This is alright even in PIC code as the constant
9579 pool reference is always PC relative and within
9580 the same translation unit. */
9581 if (CONSTANT_POOL_ADDRESS_P (x))
9582 return SYMBOL_SMALL_ABSOLUTE;
9583 else
9584 return SYMBOL_FORCE_TO_MEM;
9585
17f4d4bf
CSS
9586 default:
9587 gcc_unreachable ();
9588 }
43e9d192 9589 }
17f4d4bf 9590
43e9d192
IB
9591 /* By default push everything into the constant pool. */
9592 return SYMBOL_FORCE_TO_MEM;
9593}
9594
43e9d192
IB
9595bool
9596aarch64_constant_address_p (rtx x)
9597{
9598 return (CONSTANT_P (x) && memory_address_p (DImode, x));
9599}
9600
9601bool
9602aarch64_legitimate_pic_operand_p (rtx x)
9603{
9604 if (GET_CODE (x) == SYMBOL_REF
9605 || (GET_CODE (x) == CONST
9606 && GET_CODE (XEXP (x, 0)) == PLUS
9607 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
9608 return false;
9609
9610 return true;
9611}
9612
3520f7cc
JG
9613/* Return true if X holds either a quarter-precision or
9614 floating-point +0.0 constant. */
9615static bool
ef4bddc2 9616aarch64_valid_floating_const (machine_mode mode, rtx x)
3520f7cc
JG
9617{
9618 if (!CONST_DOUBLE_P (x))
9619 return false;
9620
6a0f8c01
JW
9621 if (aarch64_float_const_zero_rtx_p (x))
9622 return true;
9623
9624 /* We only handle moving 0.0 to a TFmode register. */
3520f7cc
JG
9625 if (!(mode == SFmode || mode == DFmode))
9626 return false;
9627
3520f7cc
JG
9628 return aarch64_float_const_representable_p (x);
9629}
9630
43e9d192 9631static bool
ef4bddc2 9632aarch64_legitimate_constant_p (machine_mode mode, rtx x)
43e9d192
IB
9633{
9634 /* Do not allow vector struct mode constants. We could support
9635 0 and -1 easily, but they need support in aarch64-simd.md. */
9636 if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
9637 return false;
9638
9639 /* This could probably go away because
9640 we now decompose CONST_INTs according to expand_mov_immediate. */
9641 if ((GET_CODE (x) == CONST_VECTOR
48063b9d 9642 && aarch64_simd_valid_immediate (x, mode, false, NULL))
3520f7cc
JG
9643 || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
9644 return !targetm.cannot_force_const_mem (mode, x);
43e9d192
IB
9645
9646 if (GET_CODE (x) == HIGH
9647 && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9648 return true;
9649
9650 return aarch64_constant_address_p (x);
9651}
9652
a5bc806c 9653rtx
43e9d192
IB
9654aarch64_load_tp (rtx target)
9655{
9656 if (!target
9657 || GET_MODE (target) != Pmode
9658 || !register_operand (target, Pmode))
9659 target = gen_reg_rtx (Pmode);
9660
9661 /* Can return in any reg. */
9662 emit_insn (gen_aarch64_load_tp_hard (target));
9663 return target;
9664}
9665
43e9d192
IB
9666/* On AAPCS systems, this is the "struct __va_list". */
9667static GTY(()) tree va_list_type;
9668
9669/* Implement TARGET_BUILD_BUILTIN_VA_LIST.
9670 Return the type to use as __builtin_va_list.
9671
9672 AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
9673
9674 struct __va_list
9675 {
9676 void *__stack;
9677 void *__gr_top;
9678 void *__vr_top;
9679 int __gr_offs;
9680 int __vr_offs;
9681 }; */
9682
9683static tree
9684aarch64_build_builtin_va_list (void)
9685{
9686 tree va_list_name;
9687 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
9688
9689 /* Create the type. */
9690 va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
9691 /* Give it the required name. */
9692 va_list_name = build_decl (BUILTINS_LOCATION,
9693 TYPE_DECL,
9694 get_identifier ("__va_list"),
9695 va_list_type);
9696 DECL_ARTIFICIAL (va_list_name) = 1;
9697 TYPE_NAME (va_list_type) = va_list_name;
665c56c6 9698 TYPE_STUB_DECL (va_list_type) = va_list_name;
43e9d192
IB
9699
9700 /* Create the fields. */
9701 f_stack = build_decl (BUILTINS_LOCATION,
9702 FIELD_DECL, get_identifier ("__stack"),
9703 ptr_type_node);
9704 f_grtop = build_decl (BUILTINS_LOCATION,
9705 FIELD_DECL, get_identifier ("__gr_top"),
9706 ptr_type_node);
9707 f_vrtop = build_decl (BUILTINS_LOCATION,
9708 FIELD_DECL, get_identifier ("__vr_top"),
9709 ptr_type_node);
9710 f_groff = build_decl (BUILTINS_LOCATION,
9711 FIELD_DECL, get_identifier ("__gr_offs"),
9712 integer_type_node);
9713 f_vroff = build_decl (BUILTINS_LOCATION,
9714 FIELD_DECL, get_identifier ("__vr_offs"),
9715 integer_type_node);
9716
88e3bdd1 9717 /* Tell tree-stdarg pass about our internal offset fields.
3fd6b9cc
JW
9718 NOTE: va_list_gpr/fpr_counter_field are only used for tree comparision
9719 purpose to identify whether the code is updating va_list internal
9720 offset fields through irregular way. */
9721 va_list_gpr_counter_field = f_groff;
9722 va_list_fpr_counter_field = f_vroff;
9723
43e9d192
IB
9724 DECL_ARTIFICIAL (f_stack) = 1;
9725 DECL_ARTIFICIAL (f_grtop) = 1;
9726 DECL_ARTIFICIAL (f_vrtop) = 1;
9727 DECL_ARTIFICIAL (f_groff) = 1;
9728 DECL_ARTIFICIAL (f_vroff) = 1;
9729
9730 DECL_FIELD_CONTEXT (f_stack) = va_list_type;
9731 DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
9732 DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
9733 DECL_FIELD_CONTEXT (f_groff) = va_list_type;
9734 DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
9735
9736 TYPE_FIELDS (va_list_type) = f_stack;
9737 DECL_CHAIN (f_stack) = f_grtop;
9738 DECL_CHAIN (f_grtop) = f_vrtop;
9739 DECL_CHAIN (f_vrtop) = f_groff;
9740 DECL_CHAIN (f_groff) = f_vroff;
9741
9742 /* Compute its layout. */
9743 layout_type (va_list_type);
9744
9745 return va_list_type;
9746}
9747
9748/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
9749static void
9750aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9751{
9752 const CUMULATIVE_ARGS *cum;
9753 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
9754 tree stack, grtop, vrtop, groff, vroff;
9755 tree t;
88e3bdd1
JW
9756 int gr_save_area_size = cfun->va_list_gpr_size;
9757 int vr_save_area_size = cfun->va_list_fpr_size;
43e9d192
IB
9758 int vr_offset;
9759
9760 cum = &crtl->args.info;
88e3bdd1
JW
9761 if (cfun->va_list_gpr_size)
9762 gr_save_area_size = MIN ((NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD,
9763 cfun->va_list_gpr_size);
9764 if (cfun->va_list_fpr_size)
9765 vr_save_area_size = MIN ((NUM_FP_ARG_REGS - cum->aapcs_nvrn)
9766 * UNITS_PER_VREG, cfun->va_list_fpr_size);
43e9d192 9767
d5726973 9768 if (!TARGET_FLOAT)
43e9d192 9769 {
261fb553 9770 gcc_assert (cum->aapcs_nvrn == 0);
43e9d192
IB
9771 vr_save_area_size = 0;
9772 }
9773
9774 f_stack = TYPE_FIELDS (va_list_type_node);
9775 f_grtop = DECL_CHAIN (f_stack);
9776 f_vrtop = DECL_CHAIN (f_grtop);
9777 f_groff = DECL_CHAIN (f_vrtop);
9778 f_vroff = DECL_CHAIN (f_groff);
9779
9780 stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
9781 NULL_TREE);
9782 grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
9783 NULL_TREE);
9784 vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
9785 NULL_TREE);
9786 groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
9787 NULL_TREE);
9788 vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
9789 NULL_TREE);
9790
9791 /* Emit code to initialize STACK, which points to the next varargs stack
9792 argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
9793 by named arguments. STACK is 8-byte aligned. */
9794 t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
9795 if (cum->aapcs_stack_size > 0)
9796 t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
9797 t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
9798 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9799
9800 /* Emit code to initialize GRTOP, the top of the GR save area.
9801 virtual_incoming_args_rtx should have been 16 byte aligned. */
9802 t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
9803 t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
9804 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9805
9806 /* Emit code to initialize VRTOP, the top of the VR save area.
9807 This address is gr_save_area_bytes below GRTOP, rounded
9808 down to the next 16-byte boundary. */
9809 t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
4f59f9f2
UB
9810 vr_offset = ROUND_UP (gr_save_area_size,
9811 STACK_BOUNDARY / BITS_PER_UNIT);
43e9d192
IB
9812
9813 if (vr_offset)
9814 t = fold_build_pointer_plus_hwi (t, -vr_offset);
9815 t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
9816 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9817
9818 /* Emit code to initialize GROFF, the offset from GRTOP of the
9819 next GPR argument. */
9820 t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
9821 build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
9822 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9823
9824 /* Likewise emit code to initialize VROFF, the offset from FTOP
9825 of the next VR argument. */
9826 t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
9827 build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
9828 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9829}
9830
9831/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
9832
9833static tree
9834aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
9835 gimple_seq *post_p ATTRIBUTE_UNUSED)
9836{
9837 tree addr;
9838 bool indirect_p;
9839 bool is_ha; /* is HFA or HVA. */
9840 bool dw_align; /* double-word align. */
ef4bddc2 9841 machine_mode ag_mode = VOIDmode;
43e9d192 9842 int nregs;
ef4bddc2 9843 machine_mode mode;
43e9d192
IB
9844
9845 tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
9846 tree stack, f_top, f_off, off, arg, roundup, on_stack;
9847 HOST_WIDE_INT size, rsize, adjust, align;
9848 tree t, u, cond1, cond2;
9849
9850 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
9851 if (indirect_p)
9852 type = build_pointer_type (type);
9853
9854 mode = TYPE_MODE (type);
9855
9856 f_stack = TYPE_FIELDS (va_list_type_node);
9857 f_grtop = DECL_CHAIN (f_stack);
9858 f_vrtop = DECL_CHAIN (f_grtop);
9859 f_groff = DECL_CHAIN (f_vrtop);
9860 f_vroff = DECL_CHAIN (f_groff);
9861
9862 stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
9863 f_stack, NULL_TREE);
9864 size = int_size_in_bytes (type);
9865 align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
9866
9867 dw_align = false;
9868 adjust = 0;
9869 if (aarch64_vfp_is_call_or_return_candidate (mode,
9870 type,
9871 &ag_mode,
9872 &nregs,
9873 &is_ha))
9874 {
9875 /* TYPE passed in fp/simd registers. */
d5726973 9876 if (!TARGET_FLOAT)
261fb553 9877 aarch64_err_no_fpadvsimd (mode, "varargs");
43e9d192
IB
9878
9879 f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
9880 unshare_expr (valist), f_vrtop, NULL_TREE);
9881 f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
9882 unshare_expr (valist), f_vroff, NULL_TREE);
9883
9884 rsize = nregs * UNITS_PER_VREG;
9885
9886 if (is_ha)
9887 {
9888 if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
9889 adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
9890 }
9891 else if (BLOCK_REG_PADDING (mode, type, 1) == downward
9892 && size < UNITS_PER_VREG)
9893 {
9894 adjust = UNITS_PER_VREG - size;
9895 }
9896 }
9897 else
9898 {
9899 /* TYPE passed in general registers. */
9900 f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
9901 unshare_expr (valist), f_grtop, NULL_TREE);
9902 f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
9903 unshare_expr (valist), f_groff, NULL_TREE);
4f59f9f2 9904 rsize = ROUND_UP (size, UNITS_PER_WORD);
43e9d192
IB
9905 nregs = rsize / UNITS_PER_WORD;
9906
9907 if (align > 8)
9908 dw_align = true;
9909
9910 if (BLOCK_REG_PADDING (mode, type, 1) == downward
9911 && size < UNITS_PER_WORD)
9912 {
9913 adjust = UNITS_PER_WORD - size;
9914 }
9915 }
9916
9917 /* Get a local temporary for the field value. */
9918 off = get_initialized_tmp_var (f_off, pre_p, NULL);
9919
9920 /* Emit code to branch if off >= 0. */
9921 t = build2 (GE_EXPR, boolean_type_node, off,
9922 build_int_cst (TREE_TYPE (off), 0));
9923 cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
9924
9925 if (dw_align)
9926 {
9927 /* Emit: offs = (offs + 15) & -16. */
9928 t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
9929 build_int_cst (TREE_TYPE (off), 15));
9930 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
9931 build_int_cst (TREE_TYPE (off), -16));
9932 roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
9933 }
9934 else
9935 roundup = NULL;
9936
9937 /* Update ap.__[g|v]r_offs */
9938 t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
9939 build_int_cst (TREE_TYPE (off), rsize));
9940 t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
9941
9942 /* String up. */
9943 if (roundup)
9944 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
9945
9946 /* [cond2] if (ap.__[g|v]r_offs > 0) */
9947 u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
9948 build_int_cst (TREE_TYPE (f_off), 0));
9949 cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
9950
9951 /* String up: make sure the assignment happens before the use. */
9952 t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
9953 COND_EXPR_ELSE (cond1) = t;
9954
9955 /* Prepare the trees handling the argument that is passed on the stack;
9956 the top level node will store in ON_STACK. */
9957 arg = get_initialized_tmp_var (stack, pre_p, NULL);
9958 if (align > 8)
9959 {
9960 /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
9961 t = fold_convert (intDI_type_node, arg);
9962 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
9963 build_int_cst (TREE_TYPE (t), 15));
9964 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9965 build_int_cst (TREE_TYPE (t), -16));
9966 t = fold_convert (TREE_TYPE (arg), t);
9967 roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
9968 }
9969 else
9970 roundup = NULL;
9971 /* Advance ap.__stack */
9972 t = fold_convert (intDI_type_node, arg);
9973 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
9974 build_int_cst (TREE_TYPE (t), size + 7));
9975 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9976 build_int_cst (TREE_TYPE (t), -8));
9977 t = fold_convert (TREE_TYPE (arg), t);
9978 t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
9979 /* String up roundup and advance. */
9980 if (roundup)
9981 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
9982 /* String up with arg */
9983 on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
9984 /* Big-endianness related address adjustment. */
9985 if (BLOCK_REG_PADDING (mode, type, 1) == downward
9986 && size < UNITS_PER_WORD)
9987 {
9988 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
9989 size_int (UNITS_PER_WORD - size));
9990 on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
9991 }
9992
9993 COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
9994 COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
9995
9996 /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
9997 t = off;
9998 if (adjust)
9999 t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
10000 build_int_cst (TREE_TYPE (off), adjust));
10001
10002 t = fold_convert (sizetype, t);
10003 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
10004
10005 if (is_ha)
10006 {
10007 /* type ha; // treat as "struct {ftype field[n];}"
10008 ... [computing offs]
10009 for (i = 0; i <nregs; ++i, offs += 16)
10010 ha.field[i] = *((ftype *)(ap.__vr_top + offs));
10011 return ha; */
10012 int i;
10013 tree tmp_ha, field_t, field_ptr_t;
10014
10015 /* Declare a local variable. */
10016 tmp_ha = create_tmp_var_raw (type, "ha");
10017 gimple_add_tmp_var (tmp_ha);
10018
10019 /* Establish the base type. */
10020 switch (ag_mode)
10021 {
10022 case SFmode:
10023 field_t = float_type_node;
10024 field_ptr_t = float_ptr_type_node;
10025 break;
10026 case DFmode:
10027 field_t = double_type_node;
10028 field_ptr_t = double_ptr_type_node;
10029 break;
10030 case TFmode:
10031 field_t = long_double_type_node;
10032 field_ptr_t = long_double_ptr_type_node;
10033 break;
43e9d192 10034 case HFmode:
1b62ed4f
JG
10035 field_t = aarch64_fp16_type_node;
10036 field_ptr_t = aarch64_fp16_ptr_type_node;
43e9d192 10037 break;
43e9d192
IB
10038 case V2SImode:
10039 case V4SImode:
10040 {
10041 tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
10042 field_t = build_vector_type_for_mode (innertype, ag_mode);
10043 field_ptr_t = build_pointer_type (field_t);
10044 }
10045 break;
10046 default:
10047 gcc_assert (0);
10048 }
10049
10050 /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
10051 tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
10052 addr = t;
10053 t = fold_convert (field_ptr_t, addr);
10054 t = build2 (MODIFY_EXPR, field_t,
10055 build1 (INDIRECT_REF, field_t, tmp_ha),
10056 build1 (INDIRECT_REF, field_t, t));
10057
10058 /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
10059 for (i = 1; i < nregs; ++i)
10060 {
10061 addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
10062 u = fold_convert (field_ptr_t, addr);
10063 u = build2 (MODIFY_EXPR, field_t,
10064 build2 (MEM_REF, field_t, tmp_ha,
10065 build_int_cst (field_ptr_t,
10066 (i *
10067 int_size_in_bytes (field_t)))),
10068 build1 (INDIRECT_REF, field_t, u));
10069 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
10070 }
10071
10072 u = fold_convert (TREE_TYPE (f_top), tmp_ha);
10073 t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
10074 }
10075
10076 COND_EXPR_ELSE (cond2) = t;
10077 addr = fold_convert (build_pointer_type (type), cond1);
10078 addr = build_va_arg_indirect_ref (addr);
10079
10080 if (indirect_p)
10081 addr = build_va_arg_indirect_ref (addr);
10082
10083 return addr;
10084}
10085
10086/* Implement TARGET_SETUP_INCOMING_VARARGS. */
10087
10088static void
ef4bddc2 10089aarch64_setup_incoming_varargs (cumulative_args_t cum_v, machine_mode mode,
43e9d192
IB
10090 tree type, int *pretend_size ATTRIBUTE_UNUSED,
10091 int no_rtl)
10092{
10093 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10094 CUMULATIVE_ARGS local_cum;
88e3bdd1
JW
10095 int gr_saved = cfun->va_list_gpr_size;
10096 int vr_saved = cfun->va_list_fpr_size;
43e9d192
IB
10097
10098 /* The caller has advanced CUM up to, but not beyond, the last named
10099 argument. Advance a local copy of CUM past the last "real" named
10100 argument, to find out how many registers are left over. */
10101 local_cum = *cum;
10102 aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
10103
88e3bdd1
JW
10104 /* Found out how many registers we need to save.
10105 Honor tree-stdvar analysis results. */
10106 if (cfun->va_list_gpr_size)
10107 gr_saved = MIN (NUM_ARG_REGS - local_cum.aapcs_ncrn,
10108 cfun->va_list_gpr_size / UNITS_PER_WORD);
10109 if (cfun->va_list_fpr_size)
10110 vr_saved = MIN (NUM_FP_ARG_REGS - local_cum.aapcs_nvrn,
10111 cfun->va_list_fpr_size / UNITS_PER_VREG);
43e9d192 10112
d5726973 10113 if (!TARGET_FLOAT)
43e9d192 10114 {
261fb553 10115 gcc_assert (local_cum.aapcs_nvrn == 0);
43e9d192
IB
10116 vr_saved = 0;
10117 }
10118
10119 if (!no_rtl)
10120 {
10121 if (gr_saved > 0)
10122 {
10123 rtx ptr, mem;
10124
10125 /* virtual_incoming_args_rtx should have been 16-byte aligned. */
10126 ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
10127 - gr_saved * UNITS_PER_WORD);
10128 mem = gen_frame_mem (BLKmode, ptr);
10129 set_mem_alias_set (mem, get_varargs_alias_set ());
10130
10131 move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
10132 mem, gr_saved);
10133 }
10134 if (vr_saved > 0)
10135 {
10136 /* We can't use move_block_from_reg, because it will use
10137 the wrong mode, storing D regs only. */
ef4bddc2 10138 machine_mode mode = TImode;
88e3bdd1 10139 int off, i, vr_start;
43e9d192
IB
10140
10141 /* Set OFF to the offset from virtual_incoming_args_rtx of
10142 the first vector register. The VR save area lies below
10143 the GR one, and is aligned to 16 bytes. */
4f59f9f2
UB
10144 off = -ROUND_UP (gr_saved * UNITS_PER_WORD,
10145 STACK_BOUNDARY / BITS_PER_UNIT);
43e9d192
IB
10146 off -= vr_saved * UNITS_PER_VREG;
10147
88e3bdd1
JW
10148 vr_start = V0_REGNUM + local_cum.aapcs_nvrn;
10149 for (i = 0; i < vr_saved; ++i)
43e9d192
IB
10150 {
10151 rtx ptr, mem;
10152
10153 ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
10154 mem = gen_frame_mem (mode, ptr);
10155 set_mem_alias_set (mem, get_varargs_alias_set ());
88e3bdd1 10156 aarch64_emit_move (mem, gen_rtx_REG (mode, vr_start + i));
43e9d192
IB
10157 off += UNITS_PER_VREG;
10158 }
10159 }
10160 }
10161
10162 /* We don't save the size into *PRETEND_SIZE because we want to avoid
10163 any complication of having crtl->args.pretend_args_size changed. */
8799637a 10164 cfun->machine->frame.saved_varargs_size
4f59f9f2
UB
10165 = (ROUND_UP (gr_saved * UNITS_PER_WORD,
10166 STACK_BOUNDARY / BITS_PER_UNIT)
43e9d192
IB
10167 + vr_saved * UNITS_PER_VREG);
10168}
10169
10170static void
10171aarch64_conditional_register_usage (void)
10172{
10173 int i;
10174 if (!TARGET_FLOAT)
10175 {
10176 for (i = V0_REGNUM; i <= V31_REGNUM; i++)
10177 {
10178 fixed_regs[i] = 1;
10179 call_used_regs[i] = 1;
10180 }
10181 }
10182}
10183
10184/* Walk down the type tree of TYPE counting consecutive base elements.
10185 If *MODEP is VOIDmode, then set it to the first valid floating point
10186 type. If a non-floating point type is found, or if a floating point
10187 type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
10188 otherwise return the count in the sub-tree. */
10189static int
ef4bddc2 10190aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
43e9d192 10191{
ef4bddc2 10192 machine_mode mode;
43e9d192
IB
10193 HOST_WIDE_INT size;
10194
10195 switch (TREE_CODE (type))
10196 {
10197 case REAL_TYPE:
10198 mode = TYPE_MODE (type);
1b62ed4f
JG
10199 if (mode != DFmode && mode != SFmode
10200 && mode != TFmode && mode != HFmode)
43e9d192
IB
10201 return -1;
10202
10203 if (*modep == VOIDmode)
10204 *modep = mode;
10205
10206 if (*modep == mode)
10207 return 1;
10208
10209 break;
10210
10211 case COMPLEX_TYPE:
10212 mode = TYPE_MODE (TREE_TYPE (type));
1b62ed4f
JG
10213 if (mode != DFmode && mode != SFmode
10214 && mode != TFmode && mode != HFmode)
43e9d192
IB
10215 return -1;
10216
10217 if (*modep == VOIDmode)
10218 *modep = mode;
10219
10220 if (*modep == mode)
10221 return 2;
10222
10223 break;
10224
10225 case VECTOR_TYPE:
10226 /* Use V2SImode and V4SImode as representatives of all 64-bit
10227 and 128-bit vector types. */
10228 size = int_size_in_bytes (type);
10229 switch (size)
10230 {
10231 case 8:
10232 mode = V2SImode;
10233 break;
10234 case 16:
10235 mode = V4SImode;
10236 break;
10237 default:
10238 return -1;
10239 }
10240
10241 if (*modep == VOIDmode)
10242 *modep = mode;
10243
10244 /* Vector modes are considered to be opaque: two vectors are
10245 equivalent for the purposes of being homogeneous aggregates
10246 if they are the same size. */
10247 if (*modep == mode)
10248 return 1;
10249
10250 break;
10251
10252 case ARRAY_TYPE:
10253 {
10254 int count;
10255 tree index = TYPE_DOMAIN (type);
10256
807e902e
KZ
10257 /* Can't handle incomplete types nor sizes that are not
10258 fixed. */
10259 if (!COMPLETE_TYPE_P (type)
10260 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
43e9d192
IB
10261 return -1;
10262
10263 count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
10264 if (count == -1
10265 || !index
10266 || !TYPE_MAX_VALUE (index)
cc269bb6 10267 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
43e9d192 10268 || !TYPE_MIN_VALUE (index)
cc269bb6 10269 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
43e9d192
IB
10270 || count < 0)
10271 return -1;
10272
ae7e9ddd
RS
10273 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10274 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
43e9d192
IB
10275
10276 /* There must be no padding. */
807e902e 10277 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
43e9d192
IB
10278 return -1;
10279
10280 return count;
10281 }
10282
10283 case RECORD_TYPE:
10284 {
10285 int count = 0;
10286 int sub_count;
10287 tree field;
10288
807e902e
KZ
10289 /* Can't handle incomplete types nor sizes that are not
10290 fixed. */
10291 if (!COMPLETE_TYPE_P (type)
10292 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
43e9d192
IB
10293 return -1;
10294
10295 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10296 {
10297 if (TREE_CODE (field) != FIELD_DECL)
10298 continue;
10299
10300 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
10301 if (sub_count < 0)
10302 return -1;
10303 count += sub_count;
10304 }
10305
10306 /* There must be no padding. */
807e902e 10307 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
43e9d192
IB
10308 return -1;
10309
10310 return count;
10311 }
10312
10313 case UNION_TYPE:
10314 case QUAL_UNION_TYPE:
10315 {
10316 /* These aren't very interesting except in a degenerate case. */
10317 int count = 0;
10318 int sub_count;
10319 tree field;
10320
807e902e
KZ
10321 /* Can't handle incomplete types nor sizes that are not
10322 fixed. */
10323 if (!COMPLETE_TYPE_P (type)
10324 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
43e9d192
IB
10325 return -1;
10326
10327 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10328 {
10329 if (TREE_CODE (field) != FIELD_DECL)
10330 continue;
10331
10332 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
10333 if (sub_count < 0)
10334 return -1;
10335 count = count > sub_count ? count : sub_count;
10336 }
10337
10338 /* There must be no padding. */
807e902e 10339 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
43e9d192
IB
10340 return -1;
10341
10342 return count;
10343 }
10344
10345 default:
10346 break;
10347 }
10348
10349 return -1;
10350}
10351
b6ec6215
KT
10352/* Return TRUE if the type, as described by TYPE and MODE, is a short vector
10353 type as described in AAPCS64 \S 4.1.2.
10354
10355 See the comment above aarch64_composite_type_p for the notes on MODE. */
10356
10357static bool
10358aarch64_short_vector_p (const_tree type,
10359 machine_mode mode)
10360{
10361 HOST_WIDE_INT size = -1;
10362
10363 if (type && TREE_CODE (type) == VECTOR_TYPE)
10364 size = int_size_in_bytes (type);
10365 else if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
10366 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
10367 size = GET_MODE_SIZE (mode);
10368
10369 return (size == 8 || size == 16);
10370}
10371
43e9d192
IB
10372/* Return TRUE if the type, as described by TYPE and MODE, is a composite
10373 type as described in AAPCS64 \S 4.3. This includes aggregate, union and
10374 array types. The C99 floating-point complex types are also considered
10375 as composite types, according to AAPCS64 \S 7.1.1. The complex integer
10376 types, which are GCC extensions and out of the scope of AAPCS64, are
10377 treated as composite types here as well.
10378
10379 Note that MODE itself is not sufficient in determining whether a type
10380 is such a composite type or not. This is because
10381 stor-layout.c:compute_record_mode may have already changed the MODE
10382 (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
10383 structure with only one field may have its MODE set to the mode of the
10384 field. Also an integer mode whose size matches the size of the
10385 RECORD_TYPE type may be used to substitute the original mode
10386 (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
10387 solely relied on. */
10388
10389static bool
10390aarch64_composite_type_p (const_tree type,
ef4bddc2 10391 machine_mode mode)
43e9d192 10392{
b6ec6215
KT
10393 if (aarch64_short_vector_p (type, mode))
10394 return false;
10395
43e9d192
IB
10396 if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
10397 return true;
10398
10399 if (mode == BLKmode
10400 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
10401 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
10402 return true;
10403
10404 return false;
10405}
10406
43e9d192
IB
10407/* Return TRUE if an argument, whose type is described by TYPE and MODE,
10408 shall be passed or returned in simd/fp register(s) (providing these
10409 parameter passing registers are available).
10410
10411 Upon successful return, *COUNT returns the number of needed registers,
10412 *BASE_MODE returns the mode of the individual register and when IS_HAF
10413 is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
10414 floating-point aggregate or a homogeneous short-vector aggregate. */
10415
10416static bool
ef4bddc2 10417aarch64_vfp_is_call_or_return_candidate (machine_mode mode,
43e9d192 10418 const_tree type,
ef4bddc2 10419 machine_mode *base_mode,
43e9d192
IB
10420 int *count,
10421 bool *is_ha)
10422{
ef4bddc2 10423 machine_mode new_mode = VOIDmode;
43e9d192
IB
10424 bool composite_p = aarch64_composite_type_p (type, mode);
10425
10426 if (is_ha != NULL) *is_ha = false;
10427
10428 if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
10429 || aarch64_short_vector_p (type, mode))
10430 {
10431 *count = 1;
10432 new_mode = mode;
10433 }
10434 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10435 {
10436 if (is_ha != NULL) *is_ha = true;
10437 *count = 2;
10438 new_mode = GET_MODE_INNER (mode);
10439 }
10440 else if (type && composite_p)
10441 {
10442 int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
10443
10444 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
10445 {
10446 if (is_ha != NULL) *is_ha = true;
10447 *count = ag_count;
10448 }
10449 else
10450 return false;
10451 }
10452 else
10453 return false;
10454
10455 *base_mode = new_mode;
10456 return true;
10457}
10458
10459/* Implement TARGET_STRUCT_VALUE_RTX. */
10460
10461static rtx
10462aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
10463 int incoming ATTRIBUTE_UNUSED)
10464{
10465 return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
10466}
10467
10468/* Implements target hook vector_mode_supported_p. */
10469static bool
ef4bddc2 10470aarch64_vector_mode_supported_p (machine_mode mode)
43e9d192
IB
10471{
10472 if (TARGET_SIMD
10473 && (mode == V4SImode || mode == V8HImode
10474 || mode == V16QImode || mode == V2DImode
10475 || mode == V2SImode || mode == V4HImode
10476 || mode == V8QImode || mode == V2SFmode
ad7d90cc 10477 || mode == V4SFmode || mode == V2DFmode
71a11456 10478 || mode == V4HFmode || mode == V8HFmode
ad7d90cc 10479 || mode == V1DFmode))
43e9d192
IB
10480 return true;
10481
10482 return false;
10483}
10484
b7342d25
IB
10485/* Return appropriate SIMD container
10486 for MODE within a vector of WIDTH bits. */
ef4bddc2
RS
10487static machine_mode
10488aarch64_simd_container_mode (machine_mode mode, unsigned width)
43e9d192 10489{
b7342d25 10490 gcc_assert (width == 64 || width == 128);
43e9d192 10491 if (TARGET_SIMD)
b7342d25
IB
10492 {
10493 if (width == 128)
10494 switch (mode)
10495 {
10496 case DFmode:
10497 return V2DFmode;
10498 case SFmode:
10499 return V4SFmode;
10500 case SImode:
10501 return V4SImode;
10502 case HImode:
10503 return V8HImode;
10504 case QImode:
10505 return V16QImode;
10506 case DImode:
10507 return V2DImode;
10508 default:
10509 break;
10510 }
10511 else
10512 switch (mode)
10513 {
10514 case SFmode:
10515 return V2SFmode;
10516 case SImode:
10517 return V2SImode;
10518 case HImode:
10519 return V4HImode;
10520 case QImode:
10521 return V8QImode;
10522 default:
10523 break;
10524 }
10525 }
43e9d192
IB
10526 return word_mode;
10527}
10528
b7342d25 10529/* Return 128-bit container as the preferred SIMD mode for MODE. */
ef4bddc2
RS
10530static machine_mode
10531aarch64_preferred_simd_mode (machine_mode mode)
b7342d25
IB
10532{
10533 return aarch64_simd_container_mode (mode, 128);
10534}
10535
3b357264
JG
10536/* Return the bitmask of possible vector sizes for the vectorizer
10537 to iterate over. */
10538static unsigned int
10539aarch64_autovectorize_vector_sizes (void)
10540{
10541 return (16 | 8);
10542}
10543
ac2b960f
YZ
10544/* Implement TARGET_MANGLE_TYPE. */
10545
6f549691 10546static const char *
ac2b960f
YZ
10547aarch64_mangle_type (const_tree type)
10548{
10549 /* The AArch64 ABI documents say that "__va_list" has to be
10550 managled as if it is in the "std" namespace. */
10551 if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
10552 return "St9__va_list";
10553
c2ec330c
AL
10554 /* Half-precision float. */
10555 if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) == 16)
10556 return "Dh";
10557
f9d53c27
TB
10558 /* Mangle AArch64-specific internal types. TYPE_NAME is non-NULL_TREE for
10559 builtin types. */
10560 if (TYPE_NAME (type) != NULL)
10561 return aarch64_mangle_builtin_type (type);
c6fc9e43 10562
ac2b960f
YZ
10563 /* Use the default mangling. */
10564 return NULL;
10565}
10566
8baff86e
KT
10567
10568/* Return true if the rtx_insn contains a MEM RTX somewhere
10569 in it. */
75cf1494
KT
10570
10571static bool
8baff86e 10572has_memory_op (rtx_insn *mem_insn)
75cf1494 10573{
8baff86e
KT
10574 subrtx_iterator::array_type array;
10575 FOR_EACH_SUBRTX (iter, array, PATTERN (mem_insn), ALL)
10576 if (MEM_P (*iter))
10577 return true;
10578
10579 return false;
75cf1494
KT
10580}
10581
10582/* Find the first rtx_insn before insn that will generate an assembly
10583 instruction. */
10584
10585static rtx_insn *
10586aarch64_prev_real_insn (rtx_insn *insn)
10587{
10588 if (!insn)
10589 return NULL;
10590
10591 do
10592 {
10593 insn = prev_real_insn (insn);
10594 }
10595 while (insn && recog_memoized (insn) < 0);
10596
10597 return insn;
10598}
10599
10600static bool
10601is_madd_op (enum attr_type t1)
10602{
10603 unsigned int i;
10604 /* A number of these may be AArch32 only. */
10605 enum attr_type mlatypes[] = {
10606 TYPE_MLA, TYPE_MLAS, TYPE_SMLAD, TYPE_SMLADX, TYPE_SMLAL, TYPE_SMLALD,
10607 TYPE_SMLALS, TYPE_SMLALXY, TYPE_SMLAWX, TYPE_SMLAWY, TYPE_SMLAXY,
10608 TYPE_SMMLA, TYPE_UMLAL, TYPE_UMLALS,TYPE_SMLSD, TYPE_SMLSDX, TYPE_SMLSLD
10609 };
10610
10611 for (i = 0; i < sizeof (mlatypes) / sizeof (enum attr_type); i++)
10612 {
10613 if (t1 == mlatypes[i])
10614 return true;
10615 }
10616
10617 return false;
10618}
10619
10620/* Check if there is a register dependency between a load and the insn
10621 for which we hold recog_data. */
10622
10623static bool
10624dep_between_memop_and_curr (rtx memop)
10625{
10626 rtx load_reg;
10627 int opno;
10628
8baff86e 10629 gcc_assert (GET_CODE (memop) == SET);
75cf1494
KT
10630
10631 if (!REG_P (SET_DEST (memop)))
10632 return false;
10633
10634 load_reg = SET_DEST (memop);
8baff86e 10635 for (opno = 1; opno < recog_data.n_operands; opno++)
75cf1494
KT
10636 {
10637 rtx operand = recog_data.operand[opno];
10638 if (REG_P (operand)
10639 && reg_overlap_mentioned_p (load_reg, operand))
10640 return true;
10641
10642 }
10643 return false;
10644}
10645
8baff86e
KT
10646
10647/* When working around the Cortex-A53 erratum 835769,
10648 given rtx_insn INSN, return true if it is a 64-bit multiply-accumulate
10649 instruction and has a preceding memory instruction such that a NOP
10650 should be inserted between them. */
10651
75cf1494
KT
10652bool
10653aarch64_madd_needs_nop (rtx_insn* insn)
10654{
10655 enum attr_type attr_type;
10656 rtx_insn *prev;
10657 rtx body;
10658
b32c1043 10659 if (!TARGET_FIX_ERR_A53_835769)
75cf1494
KT
10660 return false;
10661
e322d6e3 10662 if (!INSN_P (insn) || recog_memoized (insn) < 0)
75cf1494
KT
10663 return false;
10664
10665 attr_type = get_attr_type (insn);
10666 if (!is_madd_op (attr_type))
10667 return false;
10668
10669 prev = aarch64_prev_real_insn (insn);
3fea1a75
KT
10670 /* aarch64_prev_real_insn can call recog_memoized on insns other than INSN.
10671 Restore recog state to INSN to avoid state corruption. */
10672 extract_constrain_insn_cached (insn);
10673
8baff86e 10674 if (!prev || !has_memory_op (prev))
75cf1494
KT
10675 return false;
10676
10677 body = single_set (prev);
10678
10679 /* If the previous insn is a memory op and there is no dependency between
8baff86e
KT
10680 it and the DImode madd, emit a NOP between them. If body is NULL then we
10681 have a complex memory operation, probably a load/store pair.
10682 Be conservative for now and emit a NOP. */
10683 if (GET_MODE (recog_data.operand[0]) == DImode
10684 && (!body || !dep_between_memop_and_curr (body)))
75cf1494
KT
10685 return true;
10686
10687 return false;
10688
10689}
10690
8baff86e
KT
10691
10692/* Implement FINAL_PRESCAN_INSN. */
10693
75cf1494
KT
10694void
10695aarch64_final_prescan_insn (rtx_insn *insn)
10696{
10697 if (aarch64_madd_needs_nop (insn))
10698 fprintf (asm_out_file, "\tnop // between mem op and mult-accumulate\n");
10699}
10700
10701
43e9d192 10702/* Return the equivalent letter for size. */
81c2dfb9 10703static char
43e9d192
IB
10704sizetochar (int size)
10705{
10706 switch (size)
10707 {
10708 case 64: return 'd';
10709 case 32: return 's';
10710 case 16: return 'h';
10711 case 8 : return 'b';
10712 default: gcc_unreachable ();
10713 }
10714}
10715
3520f7cc
JG
10716/* Return true iff x is a uniform vector of floating-point
10717 constants, and the constant can be represented in
10718 quarter-precision form. Note, as aarch64_float_const_representable
10719 rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */
10720static bool
10721aarch64_vect_float_const_representable_p (rtx x)
10722{
92695fbb
RS
10723 rtx elt;
10724 return (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_FLOAT
10725 && const_vec_duplicate_p (x, &elt)
10726 && aarch64_float_const_representable_p (elt));
3520f7cc
JG
10727}
10728
d8edd899 10729/* Return true for valid and false for invalid. */
3ea63f60 10730bool
ef4bddc2 10731aarch64_simd_valid_immediate (rtx op, machine_mode mode, bool inverse,
48063b9d 10732 struct simd_immediate_info *info)
43e9d192
IB
10733{
10734#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
10735 matches = 1; \
10736 for (i = 0; i < idx; i += (STRIDE)) \
10737 if (!(TEST)) \
10738 matches = 0; \
10739 if (matches) \
10740 { \
10741 immtype = (CLASS); \
10742 elsize = (ELSIZE); \
43e9d192
IB
10743 eshift = (SHIFT); \
10744 emvn = (NEG); \
10745 break; \
10746 }
10747
10748 unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
cb5ca315 10749 unsigned int innersize = GET_MODE_UNIT_SIZE (mode);
43e9d192 10750 unsigned char bytes[16];
43e9d192
IB
10751 int immtype = -1, matches;
10752 unsigned int invmask = inverse ? 0xff : 0;
10753 int eshift, emvn;
10754
43e9d192 10755 if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3520f7cc 10756 {
81c2dfb9
IB
10757 if (! (aarch64_simd_imm_zero_p (op, mode)
10758 || aarch64_vect_float_const_representable_p (op)))
d8edd899 10759 return false;
3520f7cc 10760
48063b9d
IB
10761 if (info)
10762 {
10763 info->value = CONST_VECTOR_ELT (op, 0);
81c2dfb9 10764 info->element_width = GET_MODE_BITSIZE (GET_MODE (info->value));
48063b9d
IB
10765 info->mvn = false;
10766 info->shift = 0;
10767 }
3520f7cc 10768
d8edd899 10769 return true;
3520f7cc 10770 }
43e9d192
IB
10771
10772 /* Splat vector constant out into a byte vector. */
10773 for (i = 0; i < n_elts; i++)
10774 {
4b1e108c
AL
10775 /* The vector is provided in gcc endian-neutral fashion. For aarch64_be,
10776 it must be laid out in the vector register in reverse order. */
10777 rtx el = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? (n_elts - 1 - i) : i);
43e9d192 10778 unsigned HOST_WIDE_INT elpart;
43e9d192 10779
ee78df47
KT
10780 gcc_assert (CONST_INT_P (el));
10781 elpart = INTVAL (el);
10782
10783 for (unsigned int byte = 0; byte < innersize; byte++)
10784 {
10785 bytes[idx++] = (elpart & 0xff) ^ invmask;
10786 elpart >>= BITS_PER_UNIT;
10787 }
43e9d192 10788
43e9d192
IB
10789 }
10790
10791 /* Sanity check. */
10792 gcc_assert (idx == GET_MODE_SIZE (mode));
10793
10794 do
10795 {
10796 CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
10797 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
10798
10799 CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
10800 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
10801
10802 CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
10803 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
10804
10805 CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
10806 && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
10807
10808 CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
10809
10810 CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
10811
10812 CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
10813 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
10814
10815 CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
10816 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
10817
10818 CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
10819 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
10820
10821 CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
10822 && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
10823
10824 CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
10825
10826 CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
10827
10828 CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
e4f0f84d 10829 && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
43e9d192
IB
10830
10831 CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
e4f0f84d 10832 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
43e9d192
IB
10833
10834 CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
e4f0f84d 10835 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
43e9d192
IB
10836
10837 CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
e4f0f84d 10838 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
43e9d192
IB
10839
10840 CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
10841
10842 CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
10843 && bytes[i] == bytes[(i + 8) % idx], 0, 0);
10844 }
10845 while (0);
10846
e4f0f84d 10847 if (immtype == -1)
d8edd899 10848 return false;
43e9d192 10849
48063b9d 10850 if (info)
43e9d192 10851 {
48063b9d 10852 info->element_width = elsize;
48063b9d
IB
10853 info->mvn = emvn != 0;
10854 info->shift = eshift;
10855
43e9d192
IB
10856 unsigned HOST_WIDE_INT imm = 0;
10857
e4f0f84d
TB
10858 if (immtype >= 12 && immtype <= 15)
10859 info->msl = true;
10860
43e9d192
IB
10861 /* Un-invert bytes of recognized vector, if necessary. */
10862 if (invmask != 0)
10863 for (i = 0; i < idx; i++)
10864 bytes[i] ^= invmask;
10865
10866 if (immtype == 17)
10867 {
10868 /* FIXME: Broken on 32-bit H_W_I hosts. */
10869 gcc_assert (sizeof (HOST_WIDE_INT) == 8);
10870
10871 for (i = 0; i < 8; i++)
10872 imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
10873 << (i * BITS_PER_UNIT);
10874
43e9d192 10875
48063b9d
IB
10876 info->value = GEN_INT (imm);
10877 }
10878 else
10879 {
10880 for (i = 0; i < elsize / BITS_PER_UNIT; i++)
10881 imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
43e9d192
IB
10882
10883 /* Construct 'abcdefgh' because the assembler cannot handle
48063b9d
IB
10884 generic constants. */
10885 if (info->mvn)
43e9d192 10886 imm = ~imm;
48063b9d
IB
10887 imm = (imm >> info->shift) & 0xff;
10888 info->value = GEN_INT (imm);
10889 }
43e9d192
IB
10890 }
10891
48063b9d 10892 return true;
43e9d192
IB
10893#undef CHECK
10894}
10895
43e9d192
IB
10896/* Check of immediate shift constants are within range. */
10897bool
ef4bddc2 10898aarch64_simd_shift_imm_p (rtx x, machine_mode mode, bool left)
43e9d192
IB
10899{
10900 int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
10901 if (left)
ddeabd3e 10902 return aarch64_const_vec_all_same_in_range_p (x, 0, bit_width - 1);
43e9d192 10903 else
ddeabd3e 10904 return aarch64_const_vec_all_same_in_range_p (x, 1, bit_width);
43e9d192
IB
10905}
10906
3520f7cc
JG
10907/* Return true if X is a uniform vector where all elements
10908 are either the floating-point constant 0.0 or the
10909 integer constant 0. */
43e9d192 10910bool
ef4bddc2 10911aarch64_simd_imm_zero_p (rtx x, machine_mode mode)
43e9d192 10912{
3520f7cc 10913 return x == CONST0_RTX (mode);
43e9d192
IB
10914}
10915
7325d85a
KT
10916
10917/* Return the bitmask CONST_INT to select the bits required by a zero extract
10918 operation of width WIDTH at bit position POS. */
10919
10920rtx
10921aarch64_mask_from_zextract_ops (rtx width, rtx pos)
10922{
10923 gcc_assert (CONST_INT_P (width));
10924 gcc_assert (CONST_INT_P (pos));
10925
10926 unsigned HOST_WIDE_INT mask
10927 = ((unsigned HOST_WIDE_INT) 1 << UINTVAL (width)) - 1;
10928 return GEN_INT (mask << UINTVAL (pos));
10929}
10930
43e9d192 10931bool
ef4bddc2 10932aarch64_simd_imm_scalar_p (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
43e9d192
IB
10933{
10934 HOST_WIDE_INT imm = INTVAL (x);
10935 int i;
10936
10937 for (i = 0; i < 8; i++)
10938 {
10939 unsigned int byte = imm & 0xff;
10940 if (byte != 0xff && byte != 0)
10941 return false;
10942 imm >>= 8;
10943 }
10944
10945 return true;
10946}
10947
83f8c414 10948bool
a6e0bfa7 10949aarch64_mov_operand_p (rtx x, machine_mode mode)
83f8c414 10950{
83f8c414
CSS
10951 if (GET_CODE (x) == HIGH
10952 && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
10953 return true;
10954
82614948 10955 if (CONST_INT_P (x))
83f8c414
CSS
10956 return true;
10957
10958 if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x))
10959 return true;
10960
a6e0bfa7 10961 return aarch64_classify_symbolic_expression (x)
a5350ddc 10962 == SYMBOL_TINY_ABSOLUTE;
83f8c414
CSS
10963}
10964
43e9d192
IB
10965/* Return a const_int vector of VAL. */
10966rtx
ef4bddc2 10967aarch64_simd_gen_const_vector_dup (machine_mode mode, int val)
43e9d192
IB
10968{
10969 int nunits = GET_MODE_NUNITS (mode);
10970 rtvec v = rtvec_alloc (nunits);
10971 int i;
10972
10973 for (i=0; i < nunits; i++)
10974 RTVEC_ELT (v, i) = GEN_INT (val);
10975
10976 return gen_rtx_CONST_VECTOR (mode, v);
10977}
10978
051d0e2f
SN
10979/* Check OP is a legal scalar immediate for the MOVI instruction. */
10980
10981bool
ef4bddc2 10982aarch64_simd_scalar_immediate_valid_for_move (rtx op, machine_mode mode)
051d0e2f 10983{
ef4bddc2 10984 machine_mode vmode;
051d0e2f
SN
10985
10986 gcc_assert (!VECTOR_MODE_P (mode));
10987 vmode = aarch64_preferred_simd_mode (mode);
10988 rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
48063b9d 10989 return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
051d0e2f
SN
10990}
10991
988fa693
JG
10992/* Construct and return a PARALLEL RTX vector with elements numbering the
10993 lanes of either the high (HIGH == TRUE) or low (HIGH == FALSE) half of
10994 the vector - from the perspective of the architecture. This does not
10995 line up with GCC's perspective on lane numbers, so we end up with
10996 different masks depending on our target endian-ness. The diagram
10997 below may help. We must draw the distinction when building masks
10998 which select one half of the vector. An instruction selecting
10999 architectural low-lanes for a big-endian target, must be described using
11000 a mask selecting GCC high-lanes.
11001
11002 Big-Endian Little-Endian
11003
11004GCC 0 1 2 3 3 2 1 0
11005 | x | x | x | x | | x | x | x | x |
11006Architecture 3 2 1 0 3 2 1 0
11007
11008Low Mask: { 2, 3 } { 0, 1 }
11009High Mask: { 0, 1 } { 2, 3 }
11010*/
11011
43e9d192 11012rtx
ef4bddc2 11013aarch64_simd_vect_par_cnst_half (machine_mode mode, bool high)
43e9d192
IB
11014{
11015 int nunits = GET_MODE_NUNITS (mode);
11016 rtvec v = rtvec_alloc (nunits / 2);
988fa693
JG
11017 int high_base = nunits / 2;
11018 int low_base = 0;
11019 int base;
43e9d192
IB
11020 rtx t1;
11021 int i;
11022
988fa693
JG
11023 if (BYTES_BIG_ENDIAN)
11024 base = high ? low_base : high_base;
11025 else
11026 base = high ? high_base : low_base;
11027
11028 for (i = 0; i < nunits / 2; i++)
43e9d192
IB
11029 RTVEC_ELT (v, i) = GEN_INT (base + i);
11030
11031 t1 = gen_rtx_PARALLEL (mode, v);
11032 return t1;
11033}
11034
988fa693
JG
11035/* Check OP for validity as a PARALLEL RTX vector with elements
11036 numbering the lanes of either the high (HIGH == TRUE) or low lanes,
11037 from the perspective of the architecture. See the diagram above
11038 aarch64_simd_vect_par_cnst_half for more details. */
11039
11040bool
ef4bddc2 11041aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode,
988fa693
JG
11042 bool high)
11043{
11044 rtx ideal = aarch64_simd_vect_par_cnst_half (mode, high);
11045 HOST_WIDE_INT count_op = XVECLEN (op, 0);
11046 HOST_WIDE_INT count_ideal = XVECLEN (ideal, 0);
11047 int i = 0;
11048
11049 if (!VECTOR_MODE_P (mode))
11050 return false;
11051
11052 if (count_op != count_ideal)
11053 return false;
11054
11055 for (i = 0; i < count_ideal; i++)
11056 {
11057 rtx elt_op = XVECEXP (op, 0, i);
11058 rtx elt_ideal = XVECEXP (ideal, 0, i);
11059
4aa81c2e 11060 if (!CONST_INT_P (elt_op)
988fa693
JG
11061 || INTVAL (elt_ideal) != INTVAL (elt_op))
11062 return false;
11063 }
11064 return true;
11065}
11066
43e9d192
IB
11067/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
11068 HIGH (exclusive). */
11069void
46ed6024
CB
11070aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
11071 const_tree exp)
43e9d192
IB
11072{
11073 HOST_WIDE_INT lane;
4aa81c2e 11074 gcc_assert (CONST_INT_P (operand));
43e9d192
IB
11075 lane = INTVAL (operand);
11076
11077 if (lane < low || lane >= high)
46ed6024
CB
11078 {
11079 if (exp)
cf0c27ef 11080 error ("%Klane %wd out of range %wd - %wd", exp, lane, low, high - 1);
46ed6024 11081 else
cf0c27ef 11082 error ("lane %wd out of range %wd - %wd", lane, low, high - 1);
46ed6024 11083 }
43e9d192
IB
11084}
11085
43e9d192
IB
11086/* Return TRUE if OP is a valid vector addressing mode. */
11087bool
11088aarch64_simd_mem_operand_p (rtx op)
11089{
11090 return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
4aa81c2e 11091 || REG_P (XEXP (op, 0)));
43e9d192
IB
11092}
11093
2d8c6dc1
AH
11094/* Emit a register copy from operand to operand, taking care not to
11095 early-clobber source registers in the process.
43e9d192 11096
2d8c6dc1
AH
11097 COUNT is the number of components into which the copy needs to be
11098 decomposed. */
43e9d192 11099void
2d8c6dc1
AH
11100aarch64_simd_emit_reg_reg_move (rtx *operands, enum machine_mode mode,
11101 unsigned int count)
43e9d192
IB
11102{
11103 unsigned int i;
2d8c6dc1
AH
11104 int rdest = REGNO (operands[0]);
11105 int rsrc = REGNO (operands[1]);
43e9d192
IB
11106
11107 if (!reg_overlap_mentioned_p (operands[0], operands[1])
2d8c6dc1
AH
11108 || rdest < rsrc)
11109 for (i = 0; i < count; i++)
11110 emit_move_insn (gen_rtx_REG (mode, rdest + i),
11111 gen_rtx_REG (mode, rsrc + i));
43e9d192 11112 else
2d8c6dc1
AH
11113 for (i = 0; i < count; i++)
11114 emit_move_insn (gen_rtx_REG (mode, rdest + count - i - 1),
11115 gen_rtx_REG (mode, rsrc + count - i - 1));
43e9d192
IB
11116}
11117
668046d1 11118/* Compute and return the length of aarch64_simd_reglist<mode>, where <mode> is
6ec0e5b9 11119 one of VSTRUCT modes: OI, CI, or XI. */
668046d1
DS
11120int
11121aarch64_simd_attr_length_rglist (enum machine_mode mode)
11122{
11123 return (GET_MODE_SIZE (mode) / UNITS_PER_VREG) * 4;
11124}
11125
db0253a4
TB
11126/* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum
11127 alignment of a vector to 128 bits. */
11128static HOST_WIDE_INT
11129aarch64_simd_vector_alignment (const_tree type)
11130{
9439e9a1 11131 HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
db0253a4
TB
11132 return MIN (align, 128);
11133}
11134
11135/* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */
11136static bool
11137aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed)
11138{
11139 if (is_packed)
11140 return false;
11141
11142 /* We guarantee alignment for vectors up to 128-bits. */
11143 if (tree_int_cst_compare (TYPE_SIZE (type),
11144 bitsize_int (BIGGEST_ALIGNMENT)) > 0)
11145 return false;
11146
11147 /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
11148 return true;
11149}
11150
4369c11e
TB
11151/* If VALS is a vector constant that can be loaded into a register
11152 using DUP, generate instructions to do so and return an RTX to
11153 assign to the register. Otherwise return NULL_RTX. */
11154static rtx
11155aarch64_simd_dup_constant (rtx vals)
11156{
ef4bddc2
RS
11157 machine_mode mode = GET_MODE (vals);
11158 machine_mode inner_mode = GET_MODE_INNER (mode);
4369c11e 11159 rtx x;
4369c11e 11160
92695fbb 11161 if (!const_vec_duplicate_p (vals, &x))
4369c11e
TB
11162 return NULL_RTX;
11163
11164 /* We can load this constant by using DUP and a constant in a
11165 single ARM register. This will be cheaper than a vector
11166 load. */
92695fbb 11167 x = copy_to_mode_reg (inner_mode, x);
4369c11e
TB
11168 return gen_rtx_VEC_DUPLICATE (mode, x);
11169}
11170
11171
11172/* Generate code to load VALS, which is a PARALLEL containing only
11173 constants (for vec_init) or CONST_VECTOR, efficiently into a
11174 register. Returns an RTX to copy into the register, or NULL_RTX
11175 for a PARALLEL that can not be converted into a CONST_VECTOR. */
1df3f464 11176static rtx
4369c11e
TB
11177aarch64_simd_make_constant (rtx vals)
11178{
ef4bddc2 11179 machine_mode mode = GET_MODE (vals);
4369c11e
TB
11180 rtx const_dup;
11181 rtx const_vec = NULL_RTX;
11182 int n_elts = GET_MODE_NUNITS (mode);
11183 int n_const = 0;
11184 int i;
11185
11186 if (GET_CODE (vals) == CONST_VECTOR)
11187 const_vec = vals;
11188 else if (GET_CODE (vals) == PARALLEL)
11189 {
11190 /* A CONST_VECTOR must contain only CONST_INTs and
11191 CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
11192 Only store valid constants in a CONST_VECTOR. */
11193 for (i = 0; i < n_elts; ++i)
11194 {
11195 rtx x = XVECEXP (vals, 0, i);
11196 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
11197 n_const++;
11198 }
11199 if (n_const == n_elts)
11200 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
11201 }
11202 else
11203 gcc_unreachable ();
11204
11205 if (const_vec != NULL_RTX
48063b9d 11206 && aarch64_simd_valid_immediate (const_vec, mode, false, NULL))
4369c11e
TB
11207 /* Load using MOVI/MVNI. */
11208 return const_vec;
11209 else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
11210 /* Loaded using DUP. */
11211 return const_dup;
11212 else if (const_vec != NULL_RTX)
11213 /* Load from constant pool. We can not take advantage of single-cycle
11214 LD1 because we need a PC-relative addressing mode. */
11215 return const_vec;
11216 else
11217 /* A PARALLEL containing something not valid inside CONST_VECTOR.
11218 We can not construct an initializer. */
11219 return NULL_RTX;
11220}
11221
35a093b6
JG
11222/* Expand a vector initialisation sequence, such that TARGET is
11223 initialised to contain VALS. */
11224
4369c11e
TB
11225void
11226aarch64_expand_vector_init (rtx target, rtx vals)
11227{
ef4bddc2
RS
11228 machine_mode mode = GET_MODE (target);
11229 machine_mode inner_mode = GET_MODE_INNER (mode);
35a093b6 11230 /* The number of vector elements. */
4369c11e 11231 int n_elts = GET_MODE_NUNITS (mode);
35a093b6 11232 /* The number of vector elements which are not constant. */
8b66a2d4
AL
11233 int n_var = 0;
11234 rtx any_const = NULL_RTX;
35a093b6
JG
11235 /* The first element of vals. */
11236 rtx v0 = XVECEXP (vals, 0, 0);
4369c11e 11237 bool all_same = true;
4369c11e 11238
35a093b6 11239 /* Count the number of variable elements to initialise. */
8b66a2d4 11240 for (int i = 0; i < n_elts; ++i)
4369c11e 11241 {
8b66a2d4 11242 rtx x = XVECEXP (vals, 0, i);
35a093b6 11243 if (!(CONST_INT_P (x) || CONST_DOUBLE_P (x)))
8b66a2d4
AL
11244 ++n_var;
11245 else
11246 any_const = x;
4369c11e 11247
35a093b6 11248 all_same &= rtx_equal_p (x, v0);
4369c11e
TB
11249 }
11250
35a093b6
JG
11251 /* No variable elements, hand off to aarch64_simd_make_constant which knows
11252 how best to handle this. */
4369c11e
TB
11253 if (n_var == 0)
11254 {
11255 rtx constant = aarch64_simd_make_constant (vals);
11256 if (constant != NULL_RTX)
11257 {
11258 emit_move_insn (target, constant);
11259 return;
11260 }
11261 }
11262
11263 /* Splat a single non-constant element if we can. */
11264 if (all_same)
11265 {
35a093b6 11266 rtx x = copy_to_mode_reg (inner_mode, v0);
4369c11e
TB
11267 aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
11268 return;
11269 }
11270
35a093b6
JG
11271 /* Initialise a vector which is part-variable. We want to first try
11272 to build those lanes which are constant in the most efficient way we
11273 can. */
11274 if (n_var != n_elts)
4369c11e
TB
11275 {
11276 rtx copy = copy_rtx (vals);
4369c11e 11277
8b66a2d4
AL
11278 /* Load constant part of vector. We really don't care what goes into the
11279 parts we will overwrite, but we're more likely to be able to load the
11280 constant efficiently if it has fewer, larger, repeating parts
11281 (see aarch64_simd_valid_immediate). */
11282 for (int i = 0; i < n_elts; i++)
11283 {
11284 rtx x = XVECEXP (vals, 0, i);
11285 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
11286 continue;
11287 rtx subst = any_const;
11288 for (int bit = n_elts / 2; bit > 0; bit /= 2)
11289 {
11290 /* Look in the copied vector, as more elements are const. */
11291 rtx test = XVECEXP (copy, 0, i ^ bit);
11292 if (CONST_INT_P (test) || CONST_DOUBLE_P (test))
11293 {
11294 subst = test;
11295 break;
11296 }
11297 }
11298 XVECEXP (copy, 0, i) = subst;
11299 }
4369c11e 11300 aarch64_expand_vector_init (target, copy);
35a093b6 11301 }
4369c11e 11302
35a093b6 11303 /* Insert the variable lanes directly. */
8b66a2d4 11304
35a093b6
JG
11305 enum insn_code icode = optab_handler (vec_set_optab, mode);
11306 gcc_assert (icode != CODE_FOR_nothing);
4369c11e 11307
8b66a2d4 11308 for (int i = 0; i < n_elts; i++)
35a093b6
JG
11309 {
11310 rtx x = XVECEXP (vals, 0, i);
11311 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
11312 continue;
11313 x = copy_to_mode_reg (inner_mode, x);
11314 emit_insn (GEN_FCN (icode) (target, x, GEN_INT (i)));
11315 }
4369c11e
TB
11316}
11317
43e9d192 11318static unsigned HOST_WIDE_INT
ef4bddc2 11319aarch64_shift_truncation_mask (machine_mode mode)
43e9d192
IB
11320{
11321 return
ac59ad4e
KT
11322 (!SHIFT_COUNT_TRUNCATED
11323 || aarch64_vector_mode_supported_p (mode)
43e9d192
IB
11324 || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
11325}
11326
43e9d192
IB
11327/* Select a format to encode pointers in exception handling data. */
11328int
11329aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
11330{
11331 int type;
11332 switch (aarch64_cmodel)
11333 {
11334 case AARCH64_CMODEL_TINY:
11335 case AARCH64_CMODEL_TINY_PIC:
11336 case AARCH64_CMODEL_SMALL:
11337 case AARCH64_CMODEL_SMALL_PIC:
1b1e81f8 11338 case AARCH64_CMODEL_SMALL_SPIC:
43e9d192
IB
11339 /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
11340 for everything. */
11341 type = DW_EH_PE_sdata4;
11342 break;
11343 default:
11344 /* No assumptions here. 8-byte relocs required. */
11345 type = DW_EH_PE_sdata8;
11346 break;
11347 }
11348 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
11349}
11350
e1c1ecb0
KT
11351/* The last .arch and .tune assembly strings that we printed. */
11352static std::string aarch64_last_printed_arch_string;
11353static std::string aarch64_last_printed_tune_string;
11354
361fb3ee
KT
11355/* Implement ASM_DECLARE_FUNCTION_NAME. Output the ISA features used
11356 by the function fndecl. */
11357
11358void
11359aarch64_declare_function_name (FILE *stream, const char* name,
11360 tree fndecl)
11361{
11362 tree target_parts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
11363
11364 struct cl_target_option *targ_options;
11365 if (target_parts)
11366 targ_options = TREE_TARGET_OPTION (target_parts);
11367 else
11368 targ_options = TREE_TARGET_OPTION (target_option_current_node);
11369 gcc_assert (targ_options);
11370
11371 const struct processor *this_arch
11372 = aarch64_get_arch (targ_options->x_explicit_arch);
11373
054b4005
JG
11374 unsigned long isa_flags = targ_options->x_aarch64_isa_flags;
11375 std::string extension
04a99ebe
JG
11376 = aarch64_get_extension_string_for_isa_flags (isa_flags,
11377 this_arch->flags);
e1c1ecb0
KT
11378 /* Only update the assembler .arch string if it is distinct from the last
11379 such string we printed. */
11380 std::string to_print = this_arch->name + extension;
11381 if (to_print != aarch64_last_printed_arch_string)
11382 {
11383 asm_fprintf (asm_out_file, "\t.arch %s\n", to_print.c_str ());
11384 aarch64_last_printed_arch_string = to_print;
11385 }
361fb3ee
KT
11386
11387 /* Print the cpu name we're tuning for in the comments, might be
e1c1ecb0
KT
11388 useful to readers of the generated asm. Do it only when it changes
11389 from function to function and verbose assembly is requested. */
361fb3ee
KT
11390 const struct processor *this_tune
11391 = aarch64_get_tune_cpu (targ_options->x_explicit_tune_core);
11392
e1c1ecb0
KT
11393 if (flag_debug_asm && aarch64_last_printed_tune_string != this_tune->name)
11394 {
11395 asm_fprintf (asm_out_file, "\t" ASM_COMMENT_START ".tune %s\n",
11396 this_tune->name);
11397 aarch64_last_printed_tune_string = this_tune->name;
11398 }
361fb3ee
KT
11399
11400 /* Don't forget the type directive for ELF. */
11401 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
11402 ASM_OUTPUT_LABEL (stream, name);
11403}
11404
e1c1ecb0
KT
11405/* Implements TARGET_ASM_FILE_START. Output the assembly header. */
11406
11407static void
11408aarch64_start_file (void)
11409{
11410 struct cl_target_option *default_options
11411 = TREE_TARGET_OPTION (target_option_default_node);
11412
11413 const struct processor *default_arch
11414 = aarch64_get_arch (default_options->x_explicit_arch);
11415 unsigned long default_isa_flags = default_options->x_aarch64_isa_flags;
11416 std::string extension
04a99ebe
JG
11417 = aarch64_get_extension_string_for_isa_flags (default_isa_flags,
11418 default_arch->flags);
e1c1ecb0
KT
11419
11420 aarch64_last_printed_arch_string = default_arch->name + extension;
11421 aarch64_last_printed_tune_string = "";
11422 asm_fprintf (asm_out_file, "\t.arch %s\n",
11423 aarch64_last_printed_arch_string.c_str ());
11424
11425 default_file_start ();
11426}
11427
0462169c
SN
11428/* Emit load exclusive. */
11429
11430static void
ef4bddc2 11431aarch64_emit_load_exclusive (machine_mode mode, rtx rval,
0462169c
SN
11432 rtx mem, rtx model_rtx)
11433{
11434 rtx (*gen) (rtx, rtx, rtx);
11435
11436 switch (mode)
11437 {
11438 case QImode: gen = gen_aarch64_load_exclusiveqi; break;
11439 case HImode: gen = gen_aarch64_load_exclusivehi; break;
11440 case SImode: gen = gen_aarch64_load_exclusivesi; break;
11441 case DImode: gen = gen_aarch64_load_exclusivedi; break;
11442 default:
11443 gcc_unreachable ();
11444 }
11445
11446 emit_insn (gen (rval, mem, model_rtx));
11447}
11448
11449/* Emit store exclusive. */
11450
11451static void
ef4bddc2 11452aarch64_emit_store_exclusive (machine_mode mode, rtx bval,
0462169c
SN
11453 rtx rval, rtx mem, rtx model_rtx)
11454{
11455 rtx (*gen) (rtx, rtx, rtx, rtx);
11456
11457 switch (mode)
11458 {
11459 case QImode: gen = gen_aarch64_store_exclusiveqi; break;
11460 case HImode: gen = gen_aarch64_store_exclusivehi; break;
11461 case SImode: gen = gen_aarch64_store_exclusivesi; break;
11462 case DImode: gen = gen_aarch64_store_exclusivedi; break;
11463 default:
11464 gcc_unreachable ();
11465 }
11466
11467 emit_insn (gen (bval, rval, mem, model_rtx));
11468}
11469
11470/* Mark the previous jump instruction as unlikely. */
11471
11472static void
11473aarch64_emit_unlikely_jump (rtx insn)
11474{
e5af9ddd 11475 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
0462169c
SN
11476
11477 insn = emit_jump_insn (insn);
e5af9ddd 11478 add_int_reg_note (insn, REG_BR_PROB, very_unlikely);
0462169c
SN
11479}
11480
11481/* Expand a compare and swap pattern. */
11482
11483void
11484aarch64_expand_compare_and_swap (rtx operands[])
11485{
11486 rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
ef4bddc2 11487 machine_mode mode, cmp_mode;
b0770c0f
MW
11488 typedef rtx (*gen_cas_fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
11489 int idx;
11490 gen_cas_fn gen;
11491 const gen_cas_fn split_cas[] =
11492 {
11493 gen_aarch64_compare_and_swapqi,
11494 gen_aarch64_compare_and_swaphi,
11495 gen_aarch64_compare_and_swapsi,
11496 gen_aarch64_compare_and_swapdi
11497 };
11498 const gen_cas_fn atomic_cas[] =
11499 {
11500 gen_aarch64_compare_and_swapqi_lse,
11501 gen_aarch64_compare_and_swaphi_lse,
11502 gen_aarch64_compare_and_swapsi_lse,
11503 gen_aarch64_compare_and_swapdi_lse
11504 };
0462169c
SN
11505
11506 bval = operands[0];
11507 rval = operands[1];
11508 mem = operands[2];
11509 oldval = operands[3];
11510 newval = operands[4];
11511 is_weak = operands[5];
11512 mod_s = operands[6];
11513 mod_f = operands[7];
11514 mode = GET_MODE (mem);
11515 cmp_mode = mode;
11516
11517 /* Normally the succ memory model must be stronger than fail, but in the
11518 unlikely event of fail being ACQUIRE and succ being RELEASE we need to
11519 promote succ to ACQ_REL so that we don't lose the acquire semantics. */
11520
46b35980
AM
11521 if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
11522 && is_mm_release (memmodel_from_int (INTVAL (mod_s))))
0462169c
SN
11523 mod_s = GEN_INT (MEMMODEL_ACQ_REL);
11524
11525 switch (mode)
11526 {
11527 case QImode:
11528 case HImode:
11529 /* For short modes, we're going to perform the comparison in SImode,
11530 so do the zero-extension now. */
11531 cmp_mode = SImode;
11532 rval = gen_reg_rtx (SImode);
11533 oldval = convert_modes (SImode, mode, oldval, true);
11534 /* Fall through. */
11535
11536 case SImode:
11537 case DImode:
11538 /* Force the value into a register if needed. */
11539 if (!aarch64_plus_operand (oldval, mode))
11540 oldval = force_reg (cmp_mode, oldval);
11541 break;
11542
11543 default:
11544 gcc_unreachable ();
11545 }
11546
11547 switch (mode)
11548 {
b0770c0f
MW
11549 case QImode: idx = 0; break;
11550 case HImode: idx = 1; break;
11551 case SImode: idx = 2; break;
11552 case DImode: idx = 3; break;
0462169c
SN
11553 default:
11554 gcc_unreachable ();
11555 }
b0770c0f
MW
11556 if (TARGET_LSE)
11557 gen = atomic_cas[idx];
11558 else
11559 gen = split_cas[idx];
0462169c
SN
11560
11561 emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
11562
11563 if (mode == QImode || mode == HImode)
11564 emit_move_insn (operands[1], gen_lowpart (mode, rval));
11565
11566 x = gen_rtx_REG (CCmode, CC_REGNUM);
11567 x = gen_rtx_EQ (SImode, x, const0_rtx);
f7df4a84 11568 emit_insn (gen_rtx_SET (bval, x));
0462169c
SN
11569}
11570
641c2f8b
MW
11571/* Test whether the target supports using a atomic load-operate instruction.
11572 CODE is the operation and AFTER is TRUE if the data in memory after the
11573 operation should be returned and FALSE if the data before the operation
11574 should be returned. Returns FALSE if the operation isn't supported by the
11575 architecture. */
11576
11577bool
11578aarch64_atomic_ldop_supported_p (enum rtx_code code)
11579{
11580 if (!TARGET_LSE)
11581 return false;
11582
11583 switch (code)
11584 {
11585 case SET:
11586 case AND:
11587 case IOR:
11588 case XOR:
11589 case MINUS:
11590 case PLUS:
11591 return true;
11592 default:
11593 return false;
11594 }
11595}
11596
f70fb3b6
MW
11597/* Emit a barrier, that is appropriate for memory model MODEL, at the end of a
11598 sequence implementing an atomic operation. */
11599
11600static void
11601aarch64_emit_post_barrier (enum memmodel model)
11602{
11603 const enum memmodel base_model = memmodel_base (model);
11604
11605 if (is_mm_sync (model)
11606 && (base_model == MEMMODEL_ACQUIRE
11607 || base_model == MEMMODEL_ACQ_REL
11608 || base_model == MEMMODEL_SEQ_CST))
11609 {
11610 emit_insn (gen_mem_thread_fence (GEN_INT (MEMMODEL_SEQ_CST)));
11611 }
11612}
11613
b0770c0f
MW
11614/* Emit an atomic compare-and-swap operation. RVAL is the destination register
11615 for the data in memory. EXPECTED is the value expected to be in memory.
11616 DESIRED is the value to store to memory. MEM is the memory location. MODEL
11617 is the memory ordering to use. */
11618
11619void
11620aarch64_gen_atomic_cas (rtx rval, rtx mem,
11621 rtx expected, rtx desired,
11622 rtx model)
11623{
11624 rtx (*gen) (rtx, rtx, rtx, rtx);
11625 machine_mode mode;
11626
11627 mode = GET_MODE (mem);
11628
11629 switch (mode)
11630 {
11631 case QImode: gen = gen_aarch64_atomic_casqi; break;
11632 case HImode: gen = gen_aarch64_atomic_cashi; break;
11633 case SImode: gen = gen_aarch64_atomic_cassi; break;
11634 case DImode: gen = gen_aarch64_atomic_casdi; break;
11635 default:
11636 gcc_unreachable ();
11637 }
11638
11639 /* Move the expected value into the CAS destination register. */
11640 emit_insn (gen_rtx_SET (rval, expected));
11641
11642 /* Emit the CAS. */
11643 emit_insn (gen (rval, mem, desired, model));
11644
11645 /* Compare the expected value with the value loaded by the CAS, to establish
11646 whether the swap was made. */
11647 aarch64_gen_compare_reg (EQ, rval, expected);
11648}
11649
0462169c
SN
11650/* Split a compare and swap pattern. */
11651
11652void
11653aarch64_split_compare_and_swap (rtx operands[])
11654{
11655 rtx rval, mem, oldval, newval, scratch;
ef4bddc2 11656 machine_mode mode;
0462169c 11657 bool is_weak;
5d8a22a5
DM
11658 rtx_code_label *label1, *label2;
11659 rtx x, cond;
ab876106
MW
11660 enum memmodel model;
11661 rtx model_rtx;
0462169c
SN
11662
11663 rval = operands[0];
11664 mem = operands[1];
11665 oldval = operands[2];
11666 newval = operands[3];
11667 is_weak = (operands[4] != const0_rtx);
ab876106 11668 model_rtx = operands[5];
0462169c
SN
11669 scratch = operands[7];
11670 mode = GET_MODE (mem);
ab876106 11671 model = memmodel_from_int (INTVAL (model_rtx));
0462169c 11672
5d8a22a5 11673 label1 = NULL;
0462169c
SN
11674 if (!is_weak)
11675 {
11676 label1 = gen_label_rtx ();
11677 emit_label (label1);
11678 }
11679 label2 = gen_label_rtx ();
11680
ab876106
MW
11681 /* The initial load can be relaxed for a __sync operation since a final
11682 barrier will be emitted to stop code hoisting. */
11683 if (is_mm_sync (model))
11684 aarch64_emit_load_exclusive (mode, rval, mem,
11685 GEN_INT (MEMMODEL_RELAXED));
11686 else
11687 aarch64_emit_load_exclusive (mode, rval, mem, model_rtx);
0462169c
SN
11688
11689 cond = aarch64_gen_compare_reg (NE, rval, oldval);
11690 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
11691 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
11692 gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
f7df4a84 11693 aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
0462169c 11694
ab876106 11695 aarch64_emit_store_exclusive (mode, scratch, mem, newval, model_rtx);
0462169c
SN
11696
11697 if (!is_weak)
11698 {
11699 x = gen_rtx_NE (VOIDmode, scratch, const0_rtx);
11700 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
11701 gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
f7df4a84 11702 aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
0462169c
SN
11703 }
11704 else
11705 {
11706 cond = gen_rtx_REG (CCmode, CC_REGNUM);
11707 x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
f7df4a84 11708 emit_insn (gen_rtx_SET (cond, x));
0462169c
SN
11709 }
11710
11711 emit_label (label2);
ab876106
MW
11712
11713 /* Emit any final barrier needed for a __sync operation. */
11714 if (is_mm_sync (model))
11715 aarch64_emit_post_barrier (model);
0462169c
SN
11716}
11717
68729b06
MW
11718/* Emit a BIC instruction. */
11719
11720static void
11721aarch64_emit_bic (machine_mode mode, rtx dst, rtx s1, rtx s2, int shift)
11722{
11723 rtx shift_rtx = GEN_INT (shift);
11724 rtx (*gen) (rtx, rtx, rtx, rtx);
11725
11726 switch (mode)
11727 {
11728 case SImode: gen = gen_and_one_cmpl_lshrsi3; break;
11729 case DImode: gen = gen_and_one_cmpl_lshrdi3; break;
11730 default:
11731 gcc_unreachable ();
11732 }
11733
11734 emit_insn (gen (dst, s2, shift_rtx, s1));
11735}
11736
9cd7b720
MW
11737/* Emit an atomic swap. */
11738
11739static void
11740aarch64_emit_atomic_swap (machine_mode mode, rtx dst, rtx value,
11741 rtx mem, rtx model)
11742{
11743 rtx (*gen) (rtx, rtx, rtx, rtx);
11744
11745 switch (mode)
11746 {
11747 case QImode: gen = gen_aarch64_atomic_swpqi; break;
11748 case HImode: gen = gen_aarch64_atomic_swphi; break;
11749 case SImode: gen = gen_aarch64_atomic_swpsi; break;
11750 case DImode: gen = gen_aarch64_atomic_swpdi; break;
11751 default:
11752 gcc_unreachable ();
11753 }
11754
11755 emit_insn (gen (dst, mem, value, model));
11756}
11757
641c2f8b
MW
11758/* Operations supported by aarch64_emit_atomic_load_op. */
11759
11760enum aarch64_atomic_load_op_code
11761{
11762 AARCH64_LDOP_PLUS, /* A + B */
11763 AARCH64_LDOP_XOR, /* A ^ B */
11764 AARCH64_LDOP_OR, /* A | B */
11765 AARCH64_LDOP_BIC /* A & ~B */
11766};
11767
11768/* Emit an atomic load-operate. */
11769
11770static void
11771aarch64_emit_atomic_load_op (enum aarch64_atomic_load_op_code code,
11772 machine_mode mode, rtx dst, rtx src,
11773 rtx mem, rtx model)
11774{
11775 typedef rtx (*aarch64_atomic_load_op_fn) (rtx, rtx, rtx, rtx);
11776 const aarch64_atomic_load_op_fn plus[] =
11777 {
11778 gen_aarch64_atomic_loadaddqi,
11779 gen_aarch64_atomic_loadaddhi,
11780 gen_aarch64_atomic_loadaddsi,
11781 gen_aarch64_atomic_loadadddi
11782 };
11783 const aarch64_atomic_load_op_fn eor[] =
11784 {
11785 gen_aarch64_atomic_loadeorqi,
11786 gen_aarch64_atomic_loadeorhi,
11787 gen_aarch64_atomic_loadeorsi,
11788 gen_aarch64_atomic_loadeordi
11789 };
11790 const aarch64_atomic_load_op_fn ior[] =
11791 {
11792 gen_aarch64_atomic_loadsetqi,
11793 gen_aarch64_atomic_loadsethi,
11794 gen_aarch64_atomic_loadsetsi,
11795 gen_aarch64_atomic_loadsetdi
11796 };
11797 const aarch64_atomic_load_op_fn bic[] =
11798 {
11799 gen_aarch64_atomic_loadclrqi,
11800 gen_aarch64_atomic_loadclrhi,
11801 gen_aarch64_atomic_loadclrsi,
11802 gen_aarch64_atomic_loadclrdi
11803 };
11804 aarch64_atomic_load_op_fn gen;
11805 int idx = 0;
11806
11807 switch (mode)
11808 {
11809 case QImode: idx = 0; break;
11810 case HImode: idx = 1; break;
11811 case SImode: idx = 2; break;
11812 case DImode: idx = 3; break;
11813 default:
11814 gcc_unreachable ();
11815 }
11816
11817 switch (code)
11818 {
11819 case AARCH64_LDOP_PLUS: gen = plus[idx]; break;
11820 case AARCH64_LDOP_XOR: gen = eor[idx]; break;
11821 case AARCH64_LDOP_OR: gen = ior[idx]; break;
11822 case AARCH64_LDOP_BIC: gen = bic[idx]; break;
11823 default:
11824 gcc_unreachable ();
11825 }
11826
11827 emit_insn (gen (dst, mem, src, model));
11828}
11829
11830/* Emit an atomic load+operate. CODE is the operation. OUT_DATA is the
68729b06
MW
11831 location to store the data read from memory. OUT_RESULT is the location to
11832 store the result of the operation. MEM is the memory location to read and
11833 modify. MODEL_RTX is the memory ordering to use. VALUE is the second
11834 operand for the operation. Either OUT_DATA or OUT_RESULT, but not both, can
11835 be NULL. */
9cd7b720
MW
11836
11837void
68729b06 11838aarch64_gen_atomic_ldop (enum rtx_code code, rtx out_data, rtx out_result,
9cd7b720
MW
11839 rtx mem, rtx value, rtx model_rtx)
11840{
11841 machine_mode mode = GET_MODE (mem);
641c2f8b
MW
11842 machine_mode wmode = (mode == DImode ? DImode : SImode);
11843 const bool short_mode = (mode < SImode);
11844 aarch64_atomic_load_op_code ldop_code;
11845 rtx src;
11846 rtx x;
11847
11848 if (out_data)
11849 out_data = gen_lowpart (mode, out_data);
9cd7b720 11850
68729b06
MW
11851 if (out_result)
11852 out_result = gen_lowpart (mode, out_result);
11853
641c2f8b
MW
11854 /* Make sure the value is in a register, putting it into a destination
11855 register if it needs to be manipulated. */
11856 if (!register_operand (value, mode)
11857 || code == AND || code == MINUS)
11858 {
68729b06 11859 src = out_result ? out_result : out_data;
641c2f8b
MW
11860 emit_move_insn (src, gen_lowpart (mode, value));
11861 }
11862 else
11863 src = value;
11864 gcc_assert (register_operand (src, mode));
9cd7b720 11865
641c2f8b
MW
11866 /* Preprocess the data for the operation as necessary. If the operation is
11867 a SET then emit a swap instruction and finish. */
9cd7b720
MW
11868 switch (code)
11869 {
11870 case SET:
641c2f8b 11871 aarch64_emit_atomic_swap (mode, out_data, src, mem, model_rtx);
9cd7b720
MW
11872 return;
11873
641c2f8b
MW
11874 case MINUS:
11875 /* Negate the value and treat it as a PLUS. */
11876 {
11877 rtx neg_src;
11878
11879 /* Resize the value if necessary. */
11880 if (short_mode)
11881 src = gen_lowpart (wmode, src);
11882
11883 neg_src = gen_rtx_NEG (wmode, src);
11884 emit_insn (gen_rtx_SET (src, neg_src));
11885
11886 if (short_mode)
11887 src = gen_lowpart (mode, src);
11888 }
11889 /* Fall-through. */
11890 case PLUS:
11891 ldop_code = AARCH64_LDOP_PLUS;
11892 break;
11893
11894 case IOR:
11895 ldop_code = AARCH64_LDOP_OR;
11896 break;
11897
11898 case XOR:
11899 ldop_code = AARCH64_LDOP_XOR;
11900 break;
11901
11902 case AND:
11903 {
11904 rtx not_src;
11905
11906 /* Resize the value if necessary. */
11907 if (short_mode)
11908 src = gen_lowpart (wmode, src);
11909
11910 not_src = gen_rtx_NOT (wmode, src);
11911 emit_insn (gen_rtx_SET (src, not_src));
11912
11913 if (short_mode)
11914 src = gen_lowpart (mode, src);
11915 }
11916 ldop_code = AARCH64_LDOP_BIC;
11917 break;
11918
9cd7b720
MW
11919 default:
11920 /* The operation can't be done with atomic instructions. */
11921 gcc_unreachable ();
11922 }
641c2f8b
MW
11923
11924 aarch64_emit_atomic_load_op (ldop_code, mode, out_data, src, mem, model_rtx);
68729b06
MW
11925
11926 /* If necessary, calculate the data in memory after the update by redoing the
11927 operation from values in registers. */
11928 if (!out_result)
11929 return;
11930
11931 if (short_mode)
11932 {
11933 src = gen_lowpart (wmode, src);
11934 out_data = gen_lowpart (wmode, out_data);
11935 out_result = gen_lowpart (wmode, out_result);
11936 }
11937
11938 x = NULL_RTX;
11939
11940 switch (code)
11941 {
11942 case MINUS:
11943 case PLUS:
11944 x = gen_rtx_PLUS (wmode, out_data, src);
11945 break;
11946 case IOR:
11947 x = gen_rtx_IOR (wmode, out_data, src);
11948 break;
11949 case XOR:
11950 x = gen_rtx_XOR (wmode, out_data, src);
11951 break;
11952 case AND:
11953 aarch64_emit_bic (wmode, out_result, out_data, src, 0);
11954 return;
11955 default:
11956 gcc_unreachable ();
11957 }
11958
11959 emit_set_insn (out_result, x);
11960
11961 return;
9cd7b720
MW
11962}
11963
0462169c
SN
11964/* Split an atomic operation. */
11965
11966void
11967aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
9cd7b720 11968 rtx value, rtx model_rtx, rtx cond)
0462169c 11969{
ef4bddc2
RS
11970 machine_mode mode = GET_MODE (mem);
11971 machine_mode wmode = (mode == DImode ? DImode : SImode);
f70fb3b6
MW
11972 const enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
11973 const bool is_sync = is_mm_sync (model);
5d8a22a5
DM
11974 rtx_code_label *label;
11975 rtx x;
0462169c 11976
9cd7b720 11977 /* Split the atomic operation into a sequence. */
0462169c
SN
11978 label = gen_label_rtx ();
11979 emit_label (label);
11980
11981 if (new_out)
11982 new_out = gen_lowpart (wmode, new_out);
11983 if (old_out)
11984 old_out = gen_lowpart (wmode, old_out);
11985 else
11986 old_out = new_out;
11987 value = simplify_gen_subreg (wmode, value, mode, 0);
11988
f70fb3b6
MW
11989 /* The initial load can be relaxed for a __sync operation since a final
11990 barrier will be emitted to stop code hoisting. */
11991 if (is_sync)
11992 aarch64_emit_load_exclusive (mode, old_out, mem,
11993 GEN_INT (MEMMODEL_RELAXED));
11994 else
11995 aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
0462169c
SN
11996
11997 switch (code)
11998 {
11999 case SET:
12000 new_out = value;
12001 break;
12002
12003 case NOT:
12004 x = gen_rtx_AND (wmode, old_out, value);
f7df4a84 12005 emit_insn (gen_rtx_SET (new_out, x));
0462169c 12006 x = gen_rtx_NOT (wmode, new_out);
f7df4a84 12007 emit_insn (gen_rtx_SET (new_out, x));
0462169c
SN
12008 break;
12009
12010 case MINUS:
12011 if (CONST_INT_P (value))
12012 {
12013 value = GEN_INT (-INTVAL (value));
12014 code = PLUS;
12015 }
12016 /* Fall through. */
12017
12018 default:
12019 x = gen_rtx_fmt_ee (code, wmode, old_out, value);
f7df4a84 12020 emit_insn (gen_rtx_SET (new_out, x));
0462169c
SN
12021 break;
12022 }
12023
12024 aarch64_emit_store_exclusive (mode, cond, mem,
12025 gen_lowpart (mode, new_out), model_rtx);
12026
12027 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
12028 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
12029 gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
f7df4a84 12030 aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
f70fb3b6
MW
12031
12032 /* Emit any final barrier needed for a __sync operation. */
12033 if (is_sync)
12034 aarch64_emit_post_barrier (model);
0462169c
SN
12035}
12036
c2ec330c
AL
12037static void
12038aarch64_init_libfuncs (void)
12039{
12040 /* Half-precision float operations. The compiler handles all operations
12041 with NULL libfuncs by converting to SFmode. */
12042
12043 /* Conversions. */
12044 set_conv_libfunc (trunc_optab, HFmode, SFmode, "__gnu_f2h_ieee");
12045 set_conv_libfunc (sext_optab, SFmode, HFmode, "__gnu_h2f_ieee");
12046
12047 /* Arithmetic. */
12048 set_optab_libfunc (add_optab, HFmode, NULL);
12049 set_optab_libfunc (sdiv_optab, HFmode, NULL);
12050 set_optab_libfunc (smul_optab, HFmode, NULL);
12051 set_optab_libfunc (neg_optab, HFmode, NULL);
12052 set_optab_libfunc (sub_optab, HFmode, NULL);
12053
12054 /* Comparisons. */
12055 set_optab_libfunc (eq_optab, HFmode, NULL);
12056 set_optab_libfunc (ne_optab, HFmode, NULL);
12057 set_optab_libfunc (lt_optab, HFmode, NULL);
12058 set_optab_libfunc (le_optab, HFmode, NULL);
12059 set_optab_libfunc (ge_optab, HFmode, NULL);
12060 set_optab_libfunc (gt_optab, HFmode, NULL);
12061 set_optab_libfunc (unord_optab, HFmode, NULL);
12062}
12063
43e9d192 12064/* Target hook for c_mode_for_suffix. */
ef4bddc2 12065static machine_mode
43e9d192
IB
12066aarch64_c_mode_for_suffix (char suffix)
12067{
12068 if (suffix == 'q')
12069 return TFmode;
12070
12071 return VOIDmode;
12072}
12073
3520f7cc
JG
12074/* We can only represent floating point constants which will fit in
12075 "quarter-precision" values. These values are characterised by
12076 a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given
12077 by:
12078
12079 (-1)^s * (n/16) * 2^r
12080
12081 Where:
12082 's' is the sign bit.
12083 'n' is an integer in the range 16 <= n <= 31.
12084 'r' is an integer in the range -3 <= r <= 4. */
12085
12086/* Return true iff X can be represented by a quarter-precision
12087 floating point immediate operand X. Note, we cannot represent 0.0. */
12088bool
12089aarch64_float_const_representable_p (rtx x)
12090{
12091 /* This represents our current view of how many bits
12092 make up the mantissa. */
12093 int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
ba96cdfb 12094 int exponent;
3520f7cc 12095 unsigned HOST_WIDE_INT mantissa, mask;
3520f7cc 12096 REAL_VALUE_TYPE r, m;
807e902e 12097 bool fail;
3520f7cc
JG
12098
12099 if (!CONST_DOUBLE_P (x))
12100 return false;
12101
c2ec330c
AL
12102 /* We don't support HFmode constants yet. */
12103 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == HFmode)
94bfa2da
TV
12104 return false;
12105
34a72c33 12106 r = *CONST_DOUBLE_REAL_VALUE (x);
3520f7cc
JG
12107
12108 /* We cannot represent infinities, NaNs or +/-zero. We won't
12109 know if we have +zero until we analyse the mantissa, but we
12110 can reject the other invalid values. */
12111 if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r)
12112 || REAL_VALUE_MINUS_ZERO (r))
12113 return false;
12114
ba96cdfb 12115 /* Extract exponent. */
3520f7cc
JG
12116 r = real_value_abs (&r);
12117 exponent = REAL_EXP (&r);
12118
12119 /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
12120 highest (sign) bit, with a fixed binary point at bit point_pos.
12121 m1 holds the low part of the mantissa, m2 the high part.
12122 WARNING: If we ever have a representation using more than 2 * H_W_I - 1
12123 bits for the mantissa, this can fail (low bits will be lost). */
12124 real_ldexp (&m, &r, point_pos - exponent);
807e902e 12125 wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
3520f7cc
JG
12126
12127 /* If the low part of the mantissa has bits set we cannot represent
12128 the value. */
807e902e 12129 if (w.elt (0) != 0)
3520f7cc
JG
12130 return false;
12131 /* We have rejected the lower HOST_WIDE_INT, so update our
12132 understanding of how many bits lie in the mantissa and
12133 look only at the high HOST_WIDE_INT. */
807e902e 12134 mantissa = w.elt (1);
3520f7cc
JG
12135 point_pos -= HOST_BITS_PER_WIDE_INT;
12136
12137 /* We can only represent values with a mantissa of the form 1.xxxx. */
12138 mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
12139 if ((mantissa & mask) != 0)
12140 return false;
12141
12142 /* Having filtered unrepresentable values, we may now remove all
12143 but the highest 5 bits. */
12144 mantissa >>= point_pos - 5;
12145
12146 /* We cannot represent the value 0.0, so reject it. This is handled
12147 elsewhere. */
12148 if (mantissa == 0)
12149 return false;
12150
12151 /* Then, as bit 4 is always set, we can mask it off, leaving
12152 the mantissa in the range [0, 15]. */
12153 mantissa &= ~(1 << 4);
12154 gcc_assert (mantissa <= 15);
12155
12156 /* GCC internally does not use IEEE754-like encoding (where normalized
12157 significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c).
12158 Our mantissa values are shifted 4 places to the left relative to
12159 normalized IEEE754 so we must modify the exponent returned by REAL_EXP
12160 by 5 places to correct for GCC's representation. */
12161 exponent = 5 - exponent;
12162
12163 return (exponent >= 0 && exponent <= 7);
12164}
12165
12166char*
81c2dfb9 12167aarch64_output_simd_mov_immediate (rtx const_vector,
ef4bddc2 12168 machine_mode mode,
3520f7cc
JG
12169 unsigned width)
12170{
3ea63f60 12171 bool is_valid;
3520f7cc 12172 static char templ[40];
3520f7cc 12173 const char *mnemonic;
e4f0f84d 12174 const char *shift_op;
3520f7cc 12175 unsigned int lane_count = 0;
81c2dfb9 12176 char element_char;
3520f7cc 12177
e4f0f84d 12178 struct simd_immediate_info info = { NULL_RTX, 0, 0, false, false };
48063b9d
IB
12179
12180 /* This will return true to show const_vector is legal for use as either
12181 a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will
12182 also update INFO to show how the immediate should be generated. */
81c2dfb9 12183 is_valid = aarch64_simd_valid_immediate (const_vector, mode, false, &info);
3520f7cc
JG
12184 gcc_assert (is_valid);
12185
81c2dfb9 12186 element_char = sizetochar (info.element_width);
48063b9d
IB
12187 lane_count = width / info.element_width;
12188
3520f7cc 12189 mode = GET_MODE_INNER (mode);
0d8e1702 12190 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3520f7cc 12191 {
48063b9d 12192 gcc_assert (info.shift == 0 && ! info.mvn);
0d8e1702
KT
12193 /* For FP zero change it to a CONST_INT 0 and use the integer SIMD
12194 move immediate path. */
48063b9d
IB
12195 if (aarch64_float_const_zero_rtx_p (info.value))
12196 info.value = GEN_INT (0);
12197 else
12198 {
83faf7d0 12199 const unsigned int buf_size = 20;
48063b9d 12200 char float_buf[buf_size] = {'\0'};
34a72c33
RS
12201 real_to_decimal_for_mode (float_buf,
12202 CONST_DOUBLE_REAL_VALUE (info.value),
12203 buf_size, buf_size, 1, mode);
48063b9d
IB
12204
12205 if (lane_count == 1)
12206 snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf);
12207 else
12208 snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s",
81c2dfb9 12209 lane_count, element_char, float_buf);
48063b9d
IB
12210 return templ;
12211 }
3520f7cc 12212 }
3520f7cc 12213
48063b9d 12214 mnemonic = info.mvn ? "mvni" : "movi";
e4f0f84d 12215 shift_op = info.msl ? "msl" : "lsl";
3520f7cc 12216
0d8e1702 12217 gcc_assert (CONST_INT_P (info.value));
3520f7cc 12218 if (lane_count == 1)
48063b9d
IB
12219 snprintf (templ, sizeof (templ), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX,
12220 mnemonic, UINTVAL (info.value));
12221 else if (info.shift)
12222 snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
e4f0f84d
TB
12223 ", %s %d", mnemonic, lane_count, element_char,
12224 UINTVAL (info.value), shift_op, info.shift);
3520f7cc 12225 else
48063b9d 12226 snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX,
81c2dfb9 12227 mnemonic, lane_count, element_char, UINTVAL (info.value));
3520f7cc
JG
12228 return templ;
12229}
12230
b7342d25
IB
12231char*
12232aarch64_output_scalar_simd_mov_immediate (rtx immediate,
ef4bddc2 12233 machine_mode mode)
b7342d25 12234{
ef4bddc2 12235 machine_mode vmode;
b7342d25
IB
12236
12237 gcc_assert (!VECTOR_MODE_P (mode));
12238 vmode = aarch64_simd_container_mode (mode, 64);
12239 rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
12240 return aarch64_output_simd_mov_immediate (v_op, vmode, 64);
12241}
12242
88b08073
JG
12243/* Split operands into moves from op[1] + op[2] into op[0]. */
12244
12245void
12246aarch64_split_combinev16qi (rtx operands[3])
12247{
12248 unsigned int dest = REGNO (operands[0]);
12249 unsigned int src1 = REGNO (operands[1]);
12250 unsigned int src2 = REGNO (operands[2]);
ef4bddc2 12251 machine_mode halfmode = GET_MODE (operands[1]);
88b08073
JG
12252 unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode);
12253 rtx destlo, desthi;
12254
12255 gcc_assert (halfmode == V16QImode);
12256
12257 if (src1 == dest && src2 == dest + halfregs)
12258 {
12259 /* No-op move. Can't split to nothing; emit something. */
12260 emit_note (NOTE_INSN_DELETED);
12261 return;
12262 }
12263
12264 /* Preserve register attributes for variable tracking. */
12265 destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
12266 desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
12267 GET_MODE_SIZE (halfmode));
12268
12269 /* Special case of reversed high/low parts. */
12270 if (reg_overlap_mentioned_p (operands[2], destlo)
12271 && reg_overlap_mentioned_p (operands[1], desthi))
12272 {
12273 emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
12274 emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2]));
12275 emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
12276 }
12277 else if (!reg_overlap_mentioned_p (operands[2], destlo))
12278 {
12279 /* Try to avoid unnecessary moves if part of the result
12280 is in the right place already. */
12281 if (src1 != dest)
12282 emit_move_insn (destlo, operands[1]);
12283 if (src2 != dest + halfregs)
12284 emit_move_insn (desthi, operands[2]);
12285 }
12286 else
12287 {
12288 if (src2 != dest + halfregs)
12289 emit_move_insn (desthi, operands[2]);
12290 if (src1 != dest)
12291 emit_move_insn (destlo, operands[1]);
12292 }
12293}
12294
12295/* vec_perm support. */
12296
12297#define MAX_VECT_LEN 16
12298
12299struct expand_vec_perm_d
12300{
12301 rtx target, op0, op1;
12302 unsigned char perm[MAX_VECT_LEN];
ef4bddc2 12303 machine_mode vmode;
88b08073
JG
12304 unsigned char nelt;
12305 bool one_vector_p;
12306 bool testing_p;
12307};
12308
12309/* Generate a variable permutation. */
12310
12311static void
12312aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel)
12313{
ef4bddc2 12314 machine_mode vmode = GET_MODE (target);
88b08073
JG
12315 bool one_vector_p = rtx_equal_p (op0, op1);
12316
12317 gcc_checking_assert (vmode == V8QImode || vmode == V16QImode);
12318 gcc_checking_assert (GET_MODE (op0) == vmode);
12319 gcc_checking_assert (GET_MODE (op1) == vmode);
12320 gcc_checking_assert (GET_MODE (sel) == vmode);
12321 gcc_checking_assert (TARGET_SIMD);
12322
12323 if (one_vector_p)
12324 {
12325 if (vmode == V8QImode)
12326 {
12327 /* Expand the argument to a V16QI mode by duplicating it. */
12328 rtx pair = gen_reg_rtx (V16QImode);
12329 emit_insn (gen_aarch64_combinev8qi (pair, op0, op0));
12330 emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
12331 }
12332 else
12333 {
12334 emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel));
12335 }
12336 }
12337 else
12338 {
12339 rtx pair;
12340
12341 if (vmode == V8QImode)
12342 {
12343 pair = gen_reg_rtx (V16QImode);
12344 emit_insn (gen_aarch64_combinev8qi (pair, op0, op1));
12345 emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
12346 }
12347 else
12348 {
12349 pair = gen_reg_rtx (OImode);
12350 emit_insn (gen_aarch64_combinev16qi (pair, op0, op1));
12351 emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel));
12352 }
12353 }
12354}
12355
12356void
12357aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
12358{
ef4bddc2 12359 machine_mode vmode = GET_MODE (target);
c9d1a16a 12360 unsigned int nelt = GET_MODE_NUNITS (vmode);
88b08073 12361 bool one_vector_p = rtx_equal_p (op0, op1);
f7c4e5b8 12362 rtx mask;
88b08073
JG
12363
12364 /* The TBL instruction does not use a modulo index, so we must take care
12365 of that ourselves. */
f7c4e5b8
AL
12366 mask = aarch64_simd_gen_const_vector_dup (vmode,
12367 one_vector_p ? nelt - 1 : 2 * nelt - 1);
88b08073
JG
12368 sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
12369
f7c4e5b8
AL
12370 /* For big-endian, we also need to reverse the index within the vector
12371 (but not which vector). */
12372 if (BYTES_BIG_ENDIAN)
12373 {
12374 /* If one_vector_p, mask is a vector of (nelt - 1)'s already. */
12375 if (!one_vector_p)
12376 mask = aarch64_simd_gen_const_vector_dup (vmode, nelt - 1);
12377 sel = expand_simple_binop (vmode, XOR, sel, mask,
12378 NULL, 0, OPTAB_LIB_WIDEN);
12379 }
88b08073
JG
12380 aarch64_expand_vec_perm_1 (target, op0, op1, sel);
12381}
12382
cc4d934f
JG
12383/* Recognize patterns suitable for the TRN instructions. */
12384static bool
12385aarch64_evpc_trn (struct expand_vec_perm_d *d)
12386{
12387 unsigned int i, odd, mask, nelt = d->nelt;
12388 rtx out, in0, in1, x;
12389 rtx (*gen) (rtx, rtx, rtx);
ef4bddc2 12390 machine_mode vmode = d->vmode;
cc4d934f
JG
12391
12392 if (GET_MODE_UNIT_SIZE (vmode) > 8)
12393 return false;
12394
12395 /* Note that these are little-endian tests.
12396 We correct for big-endian later. */
12397 if (d->perm[0] == 0)
12398 odd = 0;
12399 else if (d->perm[0] == 1)
12400 odd = 1;
12401 else
12402 return false;
12403 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
12404
12405 for (i = 0; i < nelt; i += 2)
12406 {
12407 if (d->perm[i] != i + odd)
12408 return false;
12409 if (d->perm[i + 1] != ((i + nelt + odd) & mask))
12410 return false;
12411 }
12412
12413 /* Success! */
12414 if (d->testing_p)
12415 return true;
12416
12417 in0 = d->op0;
12418 in1 = d->op1;
12419 if (BYTES_BIG_ENDIAN)
12420 {
12421 x = in0, in0 = in1, in1 = x;
12422 odd = !odd;
12423 }
12424 out = d->target;
12425
12426 if (odd)
12427 {
12428 switch (vmode)
12429 {
12430 case V16QImode: gen = gen_aarch64_trn2v16qi; break;
12431 case V8QImode: gen = gen_aarch64_trn2v8qi; break;
12432 case V8HImode: gen = gen_aarch64_trn2v8hi; break;
12433 case V4HImode: gen = gen_aarch64_trn2v4hi; break;
12434 case V4SImode: gen = gen_aarch64_trn2v4si; break;
12435 case V2SImode: gen = gen_aarch64_trn2v2si; break;
12436 case V2DImode: gen = gen_aarch64_trn2v2di; break;
358decd5
JW
12437 case V4HFmode: gen = gen_aarch64_trn2v4hf; break;
12438 case V8HFmode: gen = gen_aarch64_trn2v8hf; break;
cc4d934f
JG
12439 case V4SFmode: gen = gen_aarch64_trn2v4sf; break;
12440 case V2SFmode: gen = gen_aarch64_trn2v2sf; break;
12441 case V2DFmode: gen = gen_aarch64_trn2v2df; break;
12442 default:
12443 return false;
12444 }
12445 }
12446 else
12447 {
12448 switch (vmode)
12449 {
12450 case V16QImode: gen = gen_aarch64_trn1v16qi; break;
12451 case V8QImode: gen = gen_aarch64_trn1v8qi; break;
12452 case V8HImode: gen = gen_aarch64_trn1v8hi; break;
12453 case V4HImode: gen = gen_aarch64_trn1v4hi; break;
12454 case V4SImode: gen = gen_aarch64_trn1v4si; break;
12455 case V2SImode: gen = gen_aarch64_trn1v2si; break;
12456 case V2DImode: gen = gen_aarch64_trn1v2di; break;
358decd5
JW
12457 case V4HFmode: gen = gen_aarch64_trn1v4hf; break;
12458 case V8HFmode: gen = gen_aarch64_trn1v8hf; break;
cc4d934f
JG
12459 case V4SFmode: gen = gen_aarch64_trn1v4sf; break;
12460 case V2SFmode: gen = gen_aarch64_trn1v2sf; break;
12461 case V2DFmode: gen = gen_aarch64_trn1v2df; break;
12462 default:
12463 return false;
12464 }
12465 }
12466
12467 emit_insn (gen (out, in0, in1));
12468 return true;
12469}
12470
12471/* Recognize patterns suitable for the UZP instructions. */
12472static bool
12473aarch64_evpc_uzp (struct expand_vec_perm_d *d)
12474{
12475 unsigned int i, odd, mask, nelt = d->nelt;
12476 rtx out, in0, in1, x;
12477 rtx (*gen) (rtx, rtx, rtx);
ef4bddc2 12478 machine_mode vmode = d->vmode;
cc4d934f
JG
12479
12480 if (GET_MODE_UNIT_SIZE (vmode) > 8)
12481 return false;
12482
12483 /* Note that these are little-endian tests.
12484 We correct for big-endian later. */
12485 if (d->perm[0] == 0)
12486 odd = 0;
12487 else if (d->perm[0] == 1)
12488 odd = 1;
12489 else
12490 return false;
12491 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
12492
12493 for (i = 0; i < nelt; i++)
12494 {
12495 unsigned elt = (i * 2 + odd) & mask;
12496 if (d->perm[i] != elt)
12497 return false;
12498 }
12499
12500 /* Success! */
12501 if (d->testing_p)
12502 return true;
12503
12504 in0 = d->op0;
12505 in1 = d->op1;
12506 if (BYTES_BIG_ENDIAN)
12507 {
12508 x = in0, in0 = in1, in1 = x;
12509 odd = !odd;
12510 }
12511 out = d->target;
12512
12513 if (odd)
12514 {
12515 switch (vmode)
12516 {
12517 case V16QImode: gen = gen_aarch64_uzp2v16qi; break;
12518 case V8QImode: gen = gen_aarch64_uzp2v8qi; break;
12519 case V8HImode: gen = gen_aarch64_uzp2v8hi; break;
12520 case V4HImode: gen = gen_aarch64_uzp2v4hi; break;
12521 case V4SImode: gen = gen_aarch64_uzp2v4si; break;
12522 case V2SImode: gen = gen_aarch64_uzp2v2si; break;
12523 case V2DImode: gen = gen_aarch64_uzp2v2di; break;
358decd5
JW
12524 case V4HFmode: gen = gen_aarch64_uzp2v4hf; break;
12525 case V8HFmode: gen = gen_aarch64_uzp2v8hf; break;
cc4d934f
JG
12526 case V4SFmode: gen = gen_aarch64_uzp2v4sf; break;
12527 case V2SFmode: gen = gen_aarch64_uzp2v2sf; break;
12528 case V2DFmode: gen = gen_aarch64_uzp2v2df; break;
12529 default:
12530 return false;
12531 }
12532 }
12533 else
12534 {
12535 switch (vmode)
12536 {
12537 case V16QImode: gen = gen_aarch64_uzp1v16qi; break;
12538 case V8QImode: gen = gen_aarch64_uzp1v8qi; break;
12539 case V8HImode: gen = gen_aarch64_uzp1v8hi; break;
12540 case V4HImode: gen = gen_aarch64_uzp1v4hi; break;
12541 case V4SImode: gen = gen_aarch64_uzp1v4si; break;
12542 case V2SImode: gen = gen_aarch64_uzp1v2si; break;
12543 case V2DImode: gen = gen_aarch64_uzp1v2di; break;
358decd5
JW
12544 case V4HFmode: gen = gen_aarch64_uzp1v4hf; break;
12545 case V8HFmode: gen = gen_aarch64_uzp1v8hf; break;
cc4d934f
JG
12546 case V4SFmode: gen = gen_aarch64_uzp1v4sf; break;
12547 case V2SFmode: gen = gen_aarch64_uzp1v2sf; break;
12548 case V2DFmode: gen = gen_aarch64_uzp1v2df; break;
12549 default:
12550 return false;
12551 }
12552 }
12553
12554 emit_insn (gen (out, in0, in1));
12555 return true;
12556}
12557
12558/* Recognize patterns suitable for the ZIP instructions. */
12559static bool
12560aarch64_evpc_zip (struct expand_vec_perm_d *d)
12561{
12562 unsigned int i, high, mask, nelt = d->nelt;
12563 rtx out, in0, in1, x;
12564 rtx (*gen) (rtx, rtx, rtx);
ef4bddc2 12565 machine_mode vmode = d->vmode;
cc4d934f
JG
12566
12567 if (GET_MODE_UNIT_SIZE (vmode) > 8)
12568 return false;
12569
12570 /* Note that these are little-endian tests.
12571 We correct for big-endian later. */
12572 high = nelt / 2;
12573 if (d->perm[0] == high)
12574 /* Do Nothing. */
12575 ;
12576 else if (d->perm[0] == 0)
12577 high = 0;
12578 else
12579 return false;
12580 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
12581
12582 for (i = 0; i < nelt / 2; i++)
12583 {
12584 unsigned elt = (i + high) & mask;
12585 if (d->perm[i * 2] != elt)
12586 return false;
12587 elt = (elt + nelt) & mask;
12588 if (d->perm[i * 2 + 1] != elt)
12589 return false;
12590 }
12591
12592 /* Success! */
12593 if (d->testing_p)
12594 return true;
12595
12596 in0 = d->op0;
12597 in1 = d->op1;
12598 if (BYTES_BIG_ENDIAN)
12599 {
12600 x = in0, in0 = in1, in1 = x;
12601 high = !high;
12602 }
12603 out = d->target;
12604
12605 if (high)
12606 {
12607 switch (vmode)
12608 {
12609 case V16QImode: gen = gen_aarch64_zip2v16qi; break;
12610 case V8QImode: gen = gen_aarch64_zip2v8qi; break;
12611 case V8HImode: gen = gen_aarch64_zip2v8hi; break;
12612 case V4HImode: gen = gen_aarch64_zip2v4hi; break;
12613 case V4SImode: gen = gen_aarch64_zip2v4si; break;
12614 case V2SImode: gen = gen_aarch64_zip2v2si; break;
12615 case V2DImode: gen = gen_aarch64_zip2v2di; break;
358decd5
JW
12616 case V4HFmode: gen = gen_aarch64_zip2v4hf; break;
12617 case V8HFmode: gen = gen_aarch64_zip2v8hf; break;
cc4d934f
JG
12618 case V4SFmode: gen = gen_aarch64_zip2v4sf; break;
12619 case V2SFmode: gen = gen_aarch64_zip2v2sf; break;
12620 case V2DFmode: gen = gen_aarch64_zip2v2df; break;
12621 default:
12622 return false;
12623 }
12624 }
12625 else
12626 {
12627 switch (vmode)
12628 {
12629 case V16QImode: gen = gen_aarch64_zip1v16qi; break;
12630 case V8QImode: gen = gen_aarch64_zip1v8qi; break;
12631 case V8HImode: gen = gen_aarch64_zip1v8hi; break;
12632 case V4HImode: gen = gen_aarch64_zip1v4hi; break;
12633 case V4SImode: gen = gen_aarch64_zip1v4si; break;
12634 case V2SImode: gen = gen_aarch64_zip1v2si; break;
12635 case V2DImode: gen = gen_aarch64_zip1v2di; break;
358decd5
JW
12636 case V4HFmode: gen = gen_aarch64_zip1v4hf; break;
12637 case V8HFmode: gen = gen_aarch64_zip1v8hf; break;
cc4d934f
JG
12638 case V4SFmode: gen = gen_aarch64_zip1v4sf; break;
12639 case V2SFmode: gen = gen_aarch64_zip1v2sf; break;
12640 case V2DFmode: gen = gen_aarch64_zip1v2df; break;
12641 default:
12642 return false;
12643 }
12644 }
12645
12646 emit_insn (gen (out, in0, in1));
12647 return true;
12648}
12649
ae0533da
AL
12650/* Recognize patterns for the EXT insn. */
12651
12652static bool
12653aarch64_evpc_ext (struct expand_vec_perm_d *d)
12654{
12655 unsigned int i, nelt = d->nelt;
12656 rtx (*gen) (rtx, rtx, rtx, rtx);
12657 rtx offset;
12658
12659 unsigned int location = d->perm[0]; /* Always < nelt. */
12660
12661 /* Check if the extracted indices are increasing by one. */
12662 for (i = 1; i < nelt; i++)
12663 {
12664 unsigned int required = location + i;
12665 if (d->one_vector_p)
12666 {
12667 /* We'll pass the same vector in twice, so allow indices to wrap. */
12668 required &= (nelt - 1);
12669 }
12670 if (d->perm[i] != required)
12671 return false;
12672 }
12673
ae0533da
AL
12674 switch (d->vmode)
12675 {
12676 case V16QImode: gen = gen_aarch64_extv16qi; break;
12677 case V8QImode: gen = gen_aarch64_extv8qi; break;
12678 case V4HImode: gen = gen_aarch64_extv4hi; break;
12679 case V8HImode: gen = gen_aarch64_extv8hi; break;
12680 case V2SImode: gen = gen_aarch64_extv2si; break;
12681 case V4SImode: gen = gen_aarch64_extv4si; break;
358decd5
JW
12682 case V4HFmode: gen = gen_aarch64_extv4hf; break;
12683 case V8HFmode: gen = gen_aarch64_extv8hf; break;
ae0533da
AL
12684 case V2SFmode: gen = gen_aarch64_extv2sf; break;
12685 case V4SFmode: gen = gen_aarch64_extv4sf; break;
12686 case V2DImode: gen = gen_aarch64_extv2di; break;
12687 case V2DFmode: gen = gen_aarch64_extv2df; break;
12688 default:
12689 return false;
12690 }
12691
12692 /* Success! */
12693 if (d->testing_p)
12694 return true;
12695
b31e65bb
AL
12696 /* The case where (location == 0) is a no-op for both big- and little-endian,
12697 and is removed by the mid-end at optimization levels -O1 and higher. */
12698
12699 if (BYTES_BIG_ENDIAN && (location != 0))
ae0533da
AL
12700 {
12701 /* After setup, we want the high elements of the first vector (stored
12702 at the LSB end of the register), and the low elements of the second
12703 vector (stored at the MSB end of the register). So swap. */
cb5c6c29 12704 std::swap (d->op0, d->op1);
ae0533da
AL
12705 /* location != 0 (above), so safe to assume (nelt - location) < nelt. */
12706 location = nelt - location;
12707 }
12708
12709 offset = GEN_INT (location);
12710 emit_insn (gen (d->target, d->op0, d->op1, offset));
12711 return true;
12712}
12713
923fcec3
AL
12714/* Recognize patterns for the REV insns. */
12715
12716static bool
12717aarch64_evpc_rev (struct expand_vec_perm_d *d)
12718{
12719 unsigned int i, j, diff, nelt = d->nelt;
12720 rtx (*gen) (rtx, rtx);
12721
12722 if (!d->one_vector_p)
12723 return false;
12724
12725 diff = d->perm[0];
12726 switch (diff)
12727 {
12728 case 7:
12729 switch (d->vmode)
12730 {
12731 case V16QImode: gen = gen_aarch64_rev64v16qi; break;
12732 case V8QImode: gen = gen_aarch64_rev64v8qi; break;
12733 default:
12734 return false;
12735 }
12736 break;
12737 case 3:
12738 switch (d->vmode)
12739 {
12740 case V16QImode: gen = gen_aarch64_rev32v16qi; break;
12741 case V8QImode: gen = gen_aarch64_rev32v8qi; break;
12742 case V8HImode: gen = gen_aarch64_rev64v8hi; break;
12743 case V4HImode: gen = gen_aarch64_rev64v4hi; break;
12744 default:
12745 return false;
12746 }
12747 break;
12748 case 1:
12749 switch (d->vmode)
12750 {
12751 case V16QImode: gen = gen_aarch64_rev16v16qi; break;
12752 case V8QImode: gen = gen_aarch64_rev16v8qi; break;
12753 case V8HImode: gen = gen_aarch64_rev32v8hi; break;
12754 case V4HImode: gen = gen_aarch64_rev32v4hi; break;
12755 case V4SImode: gen = gen_aarch64_rev64v4si; break;
12756 case V2SImode: gen = gen_aarch64_rev64v2si; break;
12757 case V4SFmode: gen = gen_aarch64_rev64v4sf; break;
12758 case V2SFmode: gen = gen_aarch64_rev64v2sf; break;
358decd5
JW
12759 case V8HFmode: gen = gen_aarch64_rev64v8hf; break;
12760 case V4HFmode: gen = gen_aarch64_rev64v4hf; break;
923fcec3
AL
12761 default:
12762 return false;
12763 }
12764 break;
12765 default:
12766 return false;
12767 }
12768
12769 for (i = 0; i < nelt ; i += diff + 1)
12770 for (j = 0; j <= diff; j += 1)
12771 {
12772 /* This is guaranteed to be true as the value of diff
12773 is 7, 3, 1 and we should have enough elements in the
12774 queue to generate this. Getting a vector mask with a
12775 value of diff other than these values implies that
12776 something is wrong by the time we get here. */
12777 gcc_assert (i + j < nelt);
12778 if (d->perm[i + j] != i + diff - j)
12779 return false;
12780 }
12781
12782 /* Success! */
12783 if (d->testing_p)
12784 return true;
12785
12786 emit_insn (gen (d->target, d->op0));
12787 return true;
12788}
12789
91bd4114
JG
12790static bool
12791aarch64_evpc_dup (struct expand_vec_perm_d *d)
12792{
12793 rtx (*gen) (rtx, rtx, rtx);
12794 rtx out = d->target;
12795 rtx in0;
ef4bddc2 12796 machine_mode vmode = d->vmode;
91bd4114
JG
12797 unsigned int i, elt, nelt = d->nelt;
12798 rtx lane;
12799
91bd4114
JG
12800 elt = d->perm[0];
12801 for (i = 1; i < nelt; i++)
12802 {
12803 if (elt != d->perm[i])
12804 return false;
12805 }
12806
12807 /* The generic preparation in aarch64_expand_vec_perm_const_1
12808 swaps the operand order and the permute indices if it finds
12809 d->perm[0] to be in the second operand. Thus, we can always
12810 use d->op0 and need not do any extra arithmetic to get the
12811 correct lane number. */
12812 in0 = d->op0;
f901401e 12813 lane = GEN_INT (elt); /* The pattern corrects for big-endian. */
91bd4114
JG
12814
12815 switch (vmode)
12816 {
12817 case V16QImode: gen = gen_aarch64_dup_lanev16qi; break;
12818 case V8QImode: gen = gen_aarch64_dup_lanev8qi; break;
12819 case V8HImode: gen = gen_aarch64_dup_lanev8hi; break;
12820 case V4HImode: gen = gen_aarch64_dup_lanev4hi; break;
12821 case V4SImode: gen = gen_aarch64_dup_lanev4si; break;
12822 case V2SImode: gen = gen_aarch64_dup_lanev2si; break;
12823 case V2DImode: gen = gen_aarch64_dup_lanev2di; break;
862abc04
AL
12824 case V8HFmode: gen = gen_aarch64_dup_lanev8hf; break;
12825 case V4HFmode: gen = gen_aarch64_dup_lanev4hf; break;
91bd4114
JG
12826 case V4SFmode: gen = gen_aarch64_dup_lanev4sf; break;
12827 case V2SFmode: gen = gen_aarch64_dup_lanev2sf; break;
12828 case V2DFmode: gen = gen_aarch64_dup_lanev2df; break;
12829 default:
12830 return false;
12831 }
12832
12833 emit_insn (gen (out, in0, lane));
12834 return true;
12835}
12836
88b08073
JG
12837static bool
12838aarch64_evpc_tbl (struct expand_vec_perm_d *d)
12839{
12840 rtx rperm[MAX_VECT_LEN], sel;
ef4bddc2 12841 machine_mode vmode = d->vmode;
88b08073
JG
12842 unsigned int i, nelt = d->nelt;
12843
88b08073
JG
12844 if (d->testing_p)
12845 return true;
12846
12847 /* Generic code will try constant permutation twice. Once with the
12848 original mode and again with the elements lowered to QImode.
12849 So wait and don't do the selector expansion ourselves. */
12850 if (vmode != V8QImode && vmode != V16QImode)
12851 return false;
12852
12853 for (i = 0; i < nelt; ++i)
bbcc9c00
TB
12854 {
12855 int nunits = GET_MODE_NUNITS (vmode);
12856
12857 /* If big-endian and two vectors we end up with a weird mixed-endian
12858 mode on NEON. Reverse the index within each word but not the word
12859 itself. */
12860 rperm[i] = GEN_INT (BYTES_BIG_ENDIAN ? d->perm[i] ^ (nunits - 1)
12861 : d->perm[i]);
12862 }
88b08073
JG
12863 sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
12864 sel = force_reg (vmode, sel);
12865
12866 aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel);
12867 return true;
12868}
12869
12870static bool
12871aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
12872{
12873 /* The pattern matching functions above are written to look for a small
12874 number to begin the sequence (0, 1, N/2). If we begin with an index
12875 from the second operand, we can swap the operands. */
12876 if (d->perm[0] >= d->nelt)
12877 {
12878 unsigned i, nelt = d->nelt;
88b08073 12879
0696116a 12880 gcc_assert (nelt == (nelt & -nelt));
88b08073 12881 for (i = 0; i < nelt; ++i)
0696116a 12882 d->perm[i] ^= nelt; /* Keep the same index, but in the other vector. */
88b08073 12883
cb5c6c29 12884 std::swap (d->op0, d->op1);
88b08073
JG
12885 }
12886
12887 if (TARGET_SIMD)
cc4d934f 12888 {
923fcec3
AL
12889 if (aarch64_evpc_rev (d))
12890 return true;
12891 else if (aarch64_evpc_ext (d))
ae0533da 12892 return true;
f901401e
AL
12893 else if (aarch64_evpc_dup (d))
12894 return true;
ae0533da 12895 else if (aarch64_evpc_zip (d))
cc4d934f
JG
12896 return true;
12897 else if (aarch64_evpc_uzp (d))
12898 return true;
12899 else if (aarch64_evpc_trn (d))
12900 return true;
12901 return aarch64_evpc_tbl (d);
12902 }
88b08073
JG
12903 return false;
12904}
12905
12906/* Expand a vec_perm_const pattern. */
12907
12908bool
12909aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel)
12910{
12911 struct expand_vec_perm_d d;
12912 int i, nelt, which;
12913
12914 d.target = target;
12915 d.op0 = op0;
12916 d.op1 = op1;
12917
12918 d.vmode = GET_MODE (target);
12919 gcc_assert (VECTOR_MODE_P (d.vmode));
12920 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
12921 d.testing_p = false;
12922
12923 for (i = which = 0; i < nelt; ++i)
12924 {
12925 rtx e = XVECEXP (sel, 0, i);
12926 int ei = INTVAL (e) & (2 * nelt - 1);
12927 which |= (ei < nelt ? 1 : 2);
12928 d.perm[i] = ei;
12929 }
12930
12931 switch (which)
12932 {
12933 default:
12934 gcc_unreachable ();
12935
12936 case 3:
12937 d.one_vector_p = false;
12938 if (!rtx_equal_p (op0, op1))
12939 break;
12940
12941 /* The elements of PERM do not suggest that only the first operand
12942 is used, but both operands are identical. Allow easier matching
12943 of the permutation by folding the permutation into the single
12944 input vector. */
12945 /* Fall Through. */
12946 case 2:
12947 for (i = 0; i < nelt; ++i)
12948 d.perm[i] &= nelt - 1;
12949 d.op0 = op1;
12950 d.one_vector_p = true;
12951 break;
12952
12953 case 1:
12954 d.op1 = op0;
12955 d.one_vector_p = true;
12956 break;
12957 }
12958
12959 return aarch64_expand_vec_perm_const_1 (&d);
12960}
12961
12962static bool
ef4bddc2 12963aarch64_vectorize_vec_perm_const_ok (machine_mode vmode,
88b08073
JG
12964 const unsigned char *sel)
12965{
12966 struct expand_vec_perm_d d;
12967 unsigned int i, nelt, which;
12968 bool ret;
12969
12970 d.vmode = vmode;
12971 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
12972 d.testing_p = true;
12973 memcpy (d.perm, sel, nelt);
12974
12975 /* Calculate whether all elements are in one vector. */
12976 for (i = which = 0; i < nelt; ++i)
12977 {
12978 unsigned char e = d.perm[i];
12979 gcc_assert (e < 2 * nelt);
12980 which |= (e < nelt ? 1 : 2);
12981 }
12982
12983 /* If all elements are from the second vector, reindex as if from the
12984 first vector. */
12985 if (which == 2)
12986 for (i = 0; i < nelt; ++i)
12987 d.perm[i] -= nelt;
12988
12989 /* Check whether the mask can be applied to a single vector. */
12990 d.one_vector_p = (which != 3);
12991
12992 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
12993 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
12994 if (!d.one_vector_p)
12995 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
12996
12997 start_sequence ();
12998 ret = aarch64_expand_vec_perm_const_1 (&d);
12999 end_sequence ();
13000
13001 return ret;
13002}
13003
668046d1
DS
13004rtx
13005aarch64_reverse_mask (enum machine_mode mode)
13006{
13007 /* We have to reverse each vector because we dont have
13008 a permuted load that can reverse-load according to ABI rules. */
13009 rtx mask;
13010 rtvec v = rtvec_alloc (16);
13011 int i, j;
13012 int nunits = GET_MODE_NUNITS (mode);
13013 int usize = GET_MODE_UNIT_SIZE (mode);
13014
13015 gcc_assert (BYTES_BIG_ENDIAN);
13016 gcc_assert (AARCH64_VALID_SIMD_QREG_MODE (mode));
13017
13018 for (i = 0; i < nunits; i++)
13019 for (j = 0; j < usize; j++)
13020 RTVEC_ELT (v, i * usize + j) = GEN_INT ((i + 1) * usize - 1 - j);
13021 mask = gen_rtx_CONST_VECTOR (V16QImode, v);
13022 return force_reg (V16QImode, mask);
13023}
13024
61f17a5c
WD
13025/* Implement MODES_TIEABLE_P. In principle we should always return true.
13026 However due to issues with register allocation it is preferable to avoid
13027 tieing integer scalar and FP scalar modes. Executing integer operations
13028 in general registers is better than treating them as scalar vector
13029 operations. This reduces latency and avoids redundant int<->FP moves.
13030 So tie modes if they are either the same class, or vector modes with
13031 other vector modes, vector structs or any scalar mode.
13032*/
97e1ad78
JG
13033
13034bool
ef4bddc2 13035aarch64_modes_tieable_p (machine_mode mode1, machine_mode mode2)
97e1ad78
JG
13036{
13037 if (GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2))
13038 return true;
13039
13040 /* We specifically want to allow elements of "structure" modes to
13041 be tieable to the structure. This more general condition allows
13042 other rarer situations too. */
61f17a5c
WD
13043 if (aarch64_vector_mode_p (mode1) && aarch64_vector_mode_p (mode2))
13044 return true;
13045
13046 /* Also allow any scalar modes with vectors. */
13047 if (aarch64_vector_mode_supported_p (mode1)
13048 || aarch64_vector_mode_supported_p (mode2))
97e1ad78
JG
13049 return true;
13050
13051 return false;
13052}
13053
e2c75eea
JG
13054/* Return a new RTX holding the result of moving POINTER forward by
13055 AMOUNT bytes. */
13056
13057static rtx
13058aarch64_move_pointer (rtx pointer, int amount)
13059{
13060 rtx next = plus_constant (Pmode, XEXP (pointer, 0), amount);
13061
13062 return adjust_automodify_address (pointer, GET_MODE (pointer),
13063 next, amount);
13064}
13065
13066/* Return a new RTX holding the result of moving POINTER forward by the
13067 size of the mode it points to. */
13068
13069static rtx
13070aarch64_progress_pointer (rtx pointer)
13071{
13072 HOST_WIDE_INT amount = GET_MODE_SIZE (GET_MODE (pointer));
13073
13074 return aarch64_move_pointer (pointer, amount);
13075}
13076
13077/* Copy one MODE sized block from SRC to DST, then progress SRC and DST by
13078 MODE bytes. */
13079
13080static void
13081aarch64_copy_one_block_and_progress_pointers (rtx *src, rtx *dst,
ef4bddc2 13082 machine_mode mode)
e2c75eea
JG
13083{
13084 rtx reg = gen_reg_rtx (mode);
13085
13086 /* "Cast" the pointers to the correct mode. */
13087 *src = adjust_address (*src, mode, 0);
13088 *dst = adjust_address (*dst, mode, 0);
13089 /* Emit the memcpy. */
13090 emit_move_insn (reg, *src);
13091 emit_move_insn (*dst, reg);
13092 /* Move the pointers forward. */
13093 *src = aarch64_progress_pointer (*src);
13094 *dst = aarch64_progress_pointer (*dst);
13095}
13096
13097/* Expand movmem, as if from a __builtin_memcpy. Return true if
13098 we succeed, otherwise return false. */
13099
13100bool
13101aarch64_expand_movmem (rtx *operands)
13102{
13103 unsigned int n;
13104 rtx dst = operands[0];
13105 rtx src = operands[1];
13106 rtx base;
13107 bool speed_p = !optimize_function_for_size_p (cfun);
13108
13109 /* When optimizing for size, give a better estimate of the length of a
13110 memcpy call, but use the default otherwise. */
13111 unsigned int max_instructions = (speed_p ? 15 : AARCH64_CALL_RATIO) / 2;
13112
13113 /* We can't do anything smart if the amount to copy is not constant. */
13114 if (!CONST_INT_P (operands[2]))
13115 return false;
13116
13117 n = UINTVAL (operands[2]);
13118
13119 /* Try to keep the number of instructions low. For cases below 16 bytes we
13120 need to make at most two moves. For cases above 16 bytes it will be one
13121 move for each 16 byte chunk, then at most two additional moves. */
13122 if (((n / 16) + (n % 16 ? 2 : 0)) > max_instructions)
13123 return false;
13124
13125 base = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13126 dst = adjust_automodify_address (dst, VOIDmode, base, 0);
13127
13128 base = copy_to_mode_reg (Pmode, XEXP (src, 0));
13129 src = adjust_automodify_address (src, VOIDmode, base, 0);
13130
13131 /* Simple cases. Copy 0-3 bytes, as (if applicable) a 2-byte, then a
13132 1-byte chunk. */
13133 if (n < 4)
13134 {
13135 if (n >= 2)
13136 {
13137 aarch64_copy_one_block_and_progress_pointers (&src, &dst, HImode);
13138 n -= 2;
13139 }
13140
13141 if (n == 1)
13142 aarch64_copy_one_block_and_progress_pointers (&src, &dst, QImode);
13143
13144 return true;
13145 }
13146
13147 /* Copy 4-8 bytes. First a 4-byte chunk, then (if applicable) a second
13148 4-byte chunk, partially overlapping with the previously copied chunk. */
13149 if (n < 8)
13150 {
13151 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
13152 n -= 4;
13153 if (n > 0)
13154 {
13155 int move = n - 4;
13156
13157 src = aarch64_move_pointer (src, move);
13158 dst = aarch64_move_pointer (dst, move);
13159 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
13160 }
13161 return true;
13162 }
13163
13164 /* Copy more than 8 bytes. Copy chunks of 16 bytes until we run out of
13165 them, then (if applicable) an 8-byte chunk. */
13166 while (n >= 8)
13167 {
13168 if (n / 16)
13169 {
13170 aarch64_copy_one_block_and_progress_pointers (&src, &dst, TImode);
13171 n -= 16;
13172 }
13173 else
13174 {
13175 aarch64_copy_one_block_and_progress_pointers (&src, &dst, DImode);
13176 n -= 8;
13177 }
13178 }
13179
13180 /* Finish the final bytes of the copy. We can always do this in one
13181 instruction. We either copy the exact amount we need, or partially
13182 overlap with the previous chunk we copied and copy 8-bytes. */
13183 if (n == 0)
13184 return true;
13185 else if (n == 1)
13186 aarch64_copy_one_block_and_progress_pointers (&src, &dst, QImode);
13187 else if (n == 2)
13188 aarch64_copy_one_block_and_progress_pointers (&src, &dst, HImode);
13189 else if (n == 4)
13190 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
13191 else
13192 {
13193 if (n == 3)
13194 {
13195 src = aarch64_move_pointer (src, -1);
13196 dst = aarch64_move_pointer (dst, -1);
13197 aarch64_copy_one_block_and_progress_pointers (&src, &dst, SImode);
13198 }
13199 else
13200 {
13201 int move = n - 8;
13202
13203 src = aarch64_move_pointer (src, move);
13204 dst = aarch64_move_pointer (dst, move);
13205 aarch64_copy_one_block_and_progress_pointers (&src, &dst, DImode);
13206 }
13207 }
13208
13209 return true;
13210}
13211
a3125fc2
CL
13212/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
13213
13214static unsigned HOST_WIDE_INT
13215aarch64_asan_shadow_offset (void)
13216{
13217 return (HOST_WIDE_INT_1 << 36);
13218}
13219
d3006da6 13220static bool
445d7826 13221aarch64_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
d3006da6
JG
13222 unsigned int align,
13223 enum by_pieces_operation op,
13224 bool speed_p)
13225{
13226 /* STORE_BY_PIECES can be used when copying a constant string, but
13227 in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
13228 For now we always fail this and let the move_by_pieces code copy
13229 the string from read-only memory. */
13230 if (op == STORE_BY_PIECES)
13231 return false;
13232
13233 return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
13234}
13235
5f3bc026 13236static rtx
cb4347e8 13237aarch64_gen_ccmp_first (rtx_insn **prep_seq, rtx_insn **gen_seq,
5f3bc026
ZC
13238 int code, tree treeop0, tree treeop1)
13239{
c8012fbc
WD
13240 machine_mode op_mode, cmp_mode, cc_mode = CCmode;
13241 rtx op0, op1;
5f3bc026 13242 int unsignedp = TYPE_UNSIGNED (TREE_TYPE (treeop0));
c8012fbc 13243 insn_code icode;
5f3bc026
ZC
13244 struct expand_operand ops[4];
13245
5f3bc026
ZC
13246 start_sequence ();
13247 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
13248
13249 op_mode = GET_MODE (op0);
13250 if (op_mode == VOIDmode)
13251 op_mode = GET_MODE (op1);
13252
13253 switch (op_mode)
13254 {
13255 case QImode:
13256 case HImode:
13257 case SImode:
13258 cmp_mode = SImode;
13259 icode = CODE_FOR_cmpsi;
13260 break;
13261
13262 case DImode:
13263 cmp_mode = DImode;
13264 icode = CODE_FOR_cmpdi;
13265 break;
13266
786e3c06
WD
13267 case SFmode:
13268 cmp_mode = SFmode;
13269 cc_mode = aarch64_select_cc_mode ((rtx_code) code, op0, op1);
13270 icode = cc_mode == CCFPEmode ? CODE_FOR_fcmpesf : CODE_FOR_fcmpsf;
13271 break;
13272
13273 case DFmode:
13274 cmp_mode = DFmode;
13275 cc_mode = aarch64_select_cc_mode ((rtx_code) code, op0, op1);
13276 icode = cc_mode == CCFPEmode ? CODE_FOR_fcmpedf : CODE_FOR_fcmpdf;
13277 break;
13278
5f3bc026
ZC
13279 default:
13280 end_sequence ();
13281 return NULL_RTX;
13282 }
13283
c8012fbc
WD
13284 op0 = prepare_operand (icode, op0, 0, op_mode, cmp_mode, unsignedp);
13285 op1 = prepare_operand (icode, op1, 1, op_mode, cmp_mode, unsignedp);
5f3bc026
ZC
13286 if (!op0 || !op1)
13287 {
13288 end_sequence ();
13289 return NULL_RTX;
13290 }
13291 *prep_seq = get_insns ();
13292 end_sequence ();
13293
c8012fbc
WD
13294 create_fixed_operand (&ops[0], op0);
13295 create_fixed_operand (&ops[1], op1);
5f3bc026
ZC
13296
13297 start_sequence ();
c8012fbc 13298 if (!maybe_expand_insn (icode, 2, ops))
5f3bc026
ZC
13299 {
13300 end_sequence ();
13301 return NULL_RTX;
13302 }
13303 *gen_seq = get_insns ();
13304 end_sequence ();
13305
c8012fbc
WD
13306 return gen_rtx_fmt_ee ((rtx_code) code, cc_mode,
13307 gen_rtx_REG (cc_mode, CC_REGNUM), const0_rtx);
5f3bc026
ZC
13308}
13309
13310static rtx
cb4347e8
TS
13311aarch64_gen_ccmp_next (rtx_insn **prep_seq, rtx_insn **gen_seq, rtx prev,
13312 int cmp_code, tree treeop0, tree treeop1, int bit_code)
5f3bc026 13313{
c8012fbc
WD
13314 rtx op0, op1, target;
13315 machine_mode op_mode, cmp_mode, cc_mode = CCmode;
5f3bc026 13316 int unsignedp = TYPE_UNSIGNED (TREE_TYPE (treeop0));
c8012fbc 13317 insn_code icode;
5f3bc026 13318 struct expand_operand ops[6];
c8012fbc 13319 int aarch64_cond;
5f3bc026 13320
cb4347e8 13321 push_to_sequence (*prep_seq);
5f3bc026
ZC
13322 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
13323
13324 op_mode = GET_MODE (op0);
13325 if (op_mode == VOIDmode)
13326 op_mode = GET_MODE (op1);
13327
13328 switch (op_mode)
13329 {
13330 case QImode:
13331 case HImode:
13332 case SImode:
13333 cmp_mode = SImode;
c8012fbc 13334 icode = CODE_FOR_ccmpsi;
5f3bc026
ZC
13335 break;
13336
13337 case DImode:
13338 cmp_mode = DImode;
c8012fbc 13339 icode = CODE_FOR_ccmpdi;
5f3bc026
ZC
13340 break;
13341
786e3c06
WD
13342 case SFmode:
13343 cmp_mode = SFmode;
13344 cc_mode = aarch64_select_cc_mode ((rtx_code) cmp_code, op0, op1);
13345 icode = cc_mode == CCFPEmode ? CODE_FOR_fccmpesf : CODE_FOR_fccmpsf;
13346 break;
13347
13348 case DFmode:
13349 cmp_mode = DFmode;
13350 cc_mode = aarch64_select_cc_mode ((rtx_code) cmp_code, op0, op1);
13351 icode = cc_mode == CCFPEmode ? CODE_FOR_fccmpedf : CODE_FOR_fccmpdf;
13352 break;
13353
5f3bc026
ZC
13354 default:
13355 end_sequence ();
13356 return NULL_RTX;
13357 }
13358
13359 op0 = prepare_operand (icode, op0, 2, op_mode, cmp_mode, unsignedp);
13360 op1 = prepare_operand (icode, op1, 3, op_mode, cmp_mode, unsignedp);
13361 if (!op0 || !op1)
13362 {
13363 end_sequence ();
13364 return NULL_RTX;
13365 }
13366 *prep_seq = get_insns ();
13367 end_sequence ();
13368
13369 target = gen_rtx_REG (cc_mode, CC_REGNUM);
c8012fbc 13370 aarch64_cond = aarch64_get_condition_code_1 (cc_mode, (rtx_code) cmp_code);
5f3bc026 13371
c8012fbc
WD
13372 if (bit_code != AND)
13373 {
13374 prev = gen_rtx_fmt_ee (REVERSE_CONDITION (GET_CODE (prev),
13375 GET_MODE (XEXP (prev, 0))),
13376 VOIDmode, XEXP (prev, 0), const0_rtx);
13377 aarch64_cond = AARCH64_INVERSE_CONDITION_CODE (aarch64_cond);
13378 }
13379
13380 create_fixed_operand (&ops[0], XEXP (prev, 0));
5f3bc026
ZC
13381 create_fixed_operand (&ops[1], target);
13382 create_fixed_operand (&ops[2], op0);
13383 create_fixed_operand (&ops[3], op1);
c8012fbc
WD
13384 create_fixed_operand (&ops[4], prev);
13385 create_fixed_operand (&ops[5], GEN_INT (aarch64_cond));
5f3bc026 13386
cb4347e8 13387 push_to_sequence (*gen_seq);
5f3bc026
ZC
13388 if (!maybe_expand_insn (icode, 6, ops))
13389 {
13390 end_sequence ();
13391 return NULL_RTX;
13392 }
13393
13394 *gen_seq = get_insns ();
13395 end_sequence ();
13396
c8012fbc 13397 return gen_rtx_fmt_ee ((rtx_code) cmp_code, VOIDmode, target, const0_rtx);
5f3bc026
ZC
13398}
13399
13400#undef TARGET_GEN_CCMP_FIRST
13401#define TARGET_GEN_CCMP_FIRST aarch64_gen_ccmp_first
13402
13403#undef TARGET_GEN_CCMP_NEXT
13404#define TARGET_GEN_CCMP_NEXT aarch64_gen_ccmp_next
13405
6a569cdd
KT
13406/* Implement TARGET_SCHED_MACRO_FUSION_P. Return true if target supports
13407 instruction fusion of some sort. */
13408
13409static bool
13410aarch64_macro_fusion_p (void)
13411{
b175b679 13412 return aarch64_tune_params.fusible_ops != AARCH64_FUSE_NOTHING;
6a569cdd
KT
13413}
13414
13415
13416/* Implement TARGET_SCHED_MACRO_FUSION_PAIR_P. Return true if PREV and CURR
13417 should be kept together during scheduling. */
13418
13419static bool
13420aarch_macro_fusion_pair_p (rtx_insn *prev, rtx_insn *curr)
13421{
13422 rtx set_dest;
13423 rtx prev_set = single_set (prev);
13424 rtx curr_set = single_set (curr);
13425 /* prev and curr are simple SET insns i.e. no flag setting or branching. */
13426 bool simple_sets_p = prev_set && curr_set && !any_condjump_p (curr);
13427
13428 if (!aarch64_macro_fusion_p ())
13429 return false;
13430
d7b03373 13431 if (simple_sets_p && aarch64_fusion_enabled_p (AARCH64_FUSE_MOV_MOVK))
6a569cdd
KT
13432 {
13433 /* We are trying to match:
13434 prev (mov) == (set (reg r0) (const_int imm16))
13435 curr (movk) == (set (zero_extract (reg r0)
13436 (const_int 16)
13437 (const_int 16))
13438 (const_int imm16_1)) */
13439
13440 set_dest = SET_DEST (curr_set);
13441
13442 if (GET_CODE (set_dest) == ZERO_EXTRACT
13443 && CONST_INT_P (SET_SRC (curr_set))
13444 && CONST_INT_P (SET_SRC (prev_set))
13445 && CONST_INT_P (XEXP (set_dest, 2))
13446 && INTVAL (XEXP (set_dest, 2)) == 16
13447 && REG_P (XEXP (set_dest, 0))
13448 && REG_P (SET_DEST (prev_set))
13449 && REGNO (XEXP (set_dest, 0)) == REGNO (SET_DEST (prev_set)))
13450 {
13451 return true;
13452 }
13453 }
13454
d7b03373 13455 if (simple_sets_p && aarch64_fusion_enabled_p (AARCH64_FUSE_ADRP_ADD))
9bbe08fe
KT
13456 {
13457
13458 /* We're trying to match:
13459 prev (adrp) == (set (reg r1)
13460 (high (symbol_ref ("SYM"))))
13461 curr (add) == (set (reg r0)
13462 (lo_sum (reg r1)
13463 (symbol_ref ("SYM"))))
13464 Note that r0 need not necessarily be the same as r1, especially
13465 during pre-regalloc scheduling. */
13466
13467 if (satisfies_constraint_Ush (SET_SRC (prev_set))
13468 && REG_P (SET_DEST (prev_set)) && REG_P (SET_DEST (curr_set)))
13469 {
13470 if (GET_CODE (SET_SRC (curr_set)) == LO_SUM
13471 && REG_P (XEXP (SET_SRC (curr_set), 0))
13472 && REGNO (XEXP (SET_SRC (curr_set), 0))
13473 == REGNO (SET_DEST (prev_set))
13474 && rtx_equal_p (XEXP (SET_SRC (prev_set), 0),
13475 XEXP (SET_SRC (curr_set), 1)))
13476 return true;
13477 }
13478 }
13479
d7b03373 13480 if (simple_sets_p && aarch64_fusion_enabled_p (AARCH64_FUSE_MOVK_MOVK))
cd0cb232
KT
13481 {
13482
13483 /* We're trying to match:
13484 prev (movk) == (set (zero_extract (reg r0)
13485 (const_int 16)
13486 (const_int 32))
13487 (const_int imm16_1))
13488 curr (movk) == (set (zero_extract (reg r0)
13489 (const_int 16)
13490 (const_int 48))
13491 (const_int imm16_2)) */
13492
13493 if (GET_CODE (SET_DEST (prev_set)) == ZERO_EXTRACT
13494 && GET_CODE (SET_DEST (curr_set)) == ZERO_EXTRACT
13495 && REG_P (XEXP (SET_DEST (prev_set), 0))
13496 && REG_P (XEXP (SET_DEST (curr_set), 0))
13497 && REGNO (XEXP (SET_DEST (prev_set), 0))
13498 == REGNO (XEXP (SET_DEST (curr_set), 0))
13499 && CONST_INT_P (XEXP (SET_DEST (prev_set), 2))
13500 && CONST_INT_P (XEXP (SET_DEST (curr_set), 2))
13501 && INTVAL (XEXP (SET_DEST (prev_set), 2)) == 32
13502 && INTVAL (XEXP (SET_DEST (curr_set), 2)) == 48
13503 && CONST_INT_P (SET_SRC (prev_set))
13504 && CONST_INT_P (SET_SRC (curr_set)))
13505 return true;
13506
13507 }
d7b03373 13508 if (simple_sets_p && aarch64_fusion_enabled_p (AARCH64_FUSE_ADRP_LDR))
d8354ad7
KT
13509 {
13510 /* We're trying to match:
13511 prev (adrp) == (set (reg r0)
13512 (high (symbol_ref ("SYM"))))
13513 curr (ldr) == (set (reg r1)
13514 (mem (lo_sum (reg r0)
13515 (symbol_ref ("SYM")))))
13516 or
13517 curr (ldr) == (set (reg r1)
13518 (zero_extend (mem
13519 (lo_sum (reg r0)
13520 (symbol_ref ("SYM")))))) */
13521 if (satisfies_constraint_Ush (SET_SRC (prev_set))
13522 && REG_P (SET_DEST (prev_set)) && REG_P (SET_DEST (curr_set)))
13523 {
13524 rtx curr_src = SET_SRC (curr_set);
13525
13526 if (GET_CODE (curr_src) == ZERO_EXTEND)
13527 curr_src = XEXP (curr_src, 0);
13528
13529 if (MEM_P (curr_src) && GET_CODE (XEXP (curr_src, 0)) == LO_SUM
13530 && REG_P (XEXP (XEXP (curr_src, 0), 0))
13531 && REGNO (XEXP (XEXP (curr_src, 0), 0))
13532 == REGNO (SET_DEST (prev_set))
13533 && rtx_equal_p (XEXP (XEXP (curr_src, 0), 1),
13534 XEXP (SET_SRC (prev_set), 0)))
13535 return true;
13536 }
13537 }
cd0cb232 13538
d7b03373 13539 if (aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)
00a8574a
WD
13540 && aarch_crypto_can_dual_issue (prev, curr))
13541 return true;
13542
d7b03373 13543 if (aarch64_fusion_enabled_p (AARCH64_FUSE_CMP_BRANCH)
3759108f
AP
13544 && any_condjump_p (curr))
13545 {
13546 enum attr_type prev_type = get_attr_type (prev);
13547
13548 /* FIXME: this misses some which is considered simple arthematic
13549 instructions for ThunderX. Simple shifts are missed here. */
13550 if (prev_type == TYPE_ALUS_SREG
13551 || prev_type == TYPE_ALUS_IMM
13552 || prev_type == TYPE_LOGICS_REG
13553 || prev_type == TYPE_LOGICS_IMM)
13554 return true;
13555 }
13556
6a569cdd
KT
13557 return false;
13558}
13559
f2879a90
KT
13560/* Return true iff the instruction fusion described by OP is enabled. */
13561
13562bool
13563aarch64_fusion_enabled_p (enum aarch64_fusion_pairs op)
13564{
13565 return (aarch64_tune_params.fusible_ops & op) != 0;
13566}
13567
350013bc
BC
13568/* If MEM is in the form of [base+offset], extract the two parts
13569 of address and set to BASE and OFFSET, otherwise return false
13570 after clearing BASE and OFFSET. */
13571
13572bool
13573extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
13574{
13575 rtx addr;
13576
13577 gcc_assert (MEM_P (mem));
13578
13579 addr = XEXP (mem, 0);
13580
13581 if (REG_P (addr))
13582 {
13583 *base = addr;
13584 *offset = const0_rtx;
13585 return true;
13586 }
13587
13588 if (GET_CODE (addr) == PLUS
13589 && REG_P (XEXP (addr, 0)) && CONST_INT_P (XEXP (addr, 1)))
13590 {
13591 *base = XEXP (addr, 0);
13592 *offset = XEXP (addr, 1);
13593 return true;
13594 }
13595
13596 *base = NULL_RTX;
13597 *offset = NULL_RTX;
13598
13599 return false;
13600}
13601
13602/* Types for scheduling fusion. */
13603enum sched_fusion_type
13604{
13605 SCHED_FUSION_NONE = 0,
13606 SCHED_FUSION_LD_SIGN_EXTEND,
13607 SCHED_FUSION_LD_ZERO_EXTEND,
13608 SCHED_FUSION_LD,
13609 SCHED_FUSION_ST,
13610 SCHED_FUSION_NUM
13611};
13612
13613/* If INSN is a load or store of address in the form of [base+offset],
13614 extract the two parts and set to BASE and OFFSET. Return scheduling
13615 fusion type this INSN is. */
13616
13617static enum sched_fusion_type
13618fusion_load_store (rtx_insn *insn, rtx *base, rtx *offset)
13619{
13620 rtx x, dest, src;
13621 enum sched_fusion_type fusion = SCHED_FUSION_LD;
13622
13623 gcc_assert (INSN_P (insn));
13624 x = PATTERN (insn);
13625 if (GET_CODE (x) != SET)
13626 return SCHED_FUSION_NONE;
13627
13628 src = SET_SRC (x);
13629 dest = SET_DEST (x);
13630
abc52318
KT
13631 machine_mode dest_mode = GET_MODE (dest);
13632
13633 if (!aarch64_mode_valid_for_sched_fusion_p (dest_mode))
350013bc
BC
13634 return SCHED_FUSION_NONE;
13635
13636 if (GET_CODE (src) == SIGN_EXTEND)
13637 {
13638 fusion = SCHED_FUSION_LD_SIGN_EXTEND;
13639 src = XEXP (src, 0);
13640 if (GET_CODE (src) != MEM || GET_MODE (src) != SImode)
13641 return SCHED_FUSION_NONE;
13642 }
13643 else if (GET_CODE (src) == ZERO_EXTEND)
13644 {
13645 fusion = SCHED_FUSION_LD_ZERO_EXTEND;
13646 src = XEXP (src, 0);
13647 if (GET_CODE (src) != MEM || GET_MODE (src) != SImode)
13648 return SCHED_FUSION_NONE;
13649 }
13650
13651 if (GET_CODE (src) == MEM && REG_P (dest))
13652 extract_base_offset_in_addr (src, base, offset);
13653 else if (GET_CODE (dest) == MEM && (REG_P (src) || src == const0_rtx))
13654 {
13655 fusion = SCHED_FUSION_ST;
13656 extract_base_offset_in_addr (dest, base, offset);
13657 }
13658 else
13659 return SCHED_FUSION_NONE;
13660
13661 if (*base == NULL_RTX || *offset == NULL_RTX)
13662 fusion = SCHED_FUSION_NONE;
13663
13664 return fusion;
13665}
13666
13667/* Implement the TARGET_SCHED_FUSION_PRIORITY hook.
13668
13669 Currently we only support to fuse ldr or str instructions, so FUSION_PRI
13670 and PRI are only calculated for these instructions. For other instruction,
13671 FUSION_PRI and PRI are simply set to MAX_PRI - 1. In the future, other
13672 type instruction fusion can be added by returning different priorities.
13673
13674 It's important that irrelevant instructions get the largest FUSION_PRI. */
13675
13676static void
13677aarch64_sched_fusion_priority (rtx_insn *insn, int max_pri,
13678 int *fusion_pri, int *pri)
13679{
13680 int tmp, off_val;
13681 rtx base, offset;
13682 enum sched_fusion_type fusion;
13683
13684 gcc_assert (INSN_P (insn));
13685
13686 tmp = max_pri - 1;
13687 fusion = fusion_load_store (insn, &base, &offset);
13688 if (fusion == SCHED_FUSION_NONE)
13689 {
13690 *pri = tmp;
13691 *fusion_pri = tmp;
13692 return;
13693 }
13694
13695 /* Set FUSION_PRI according to fusion type and base register. */
13696 *fusion_pri = tmp - fusion * FIRST_PSEUDO_REGISTER - REGNO (base);
13697
13698 /* Calculate PRI. */
13699 tmp /= 2;
13700
13701 /* INSN with smaller offset goes first. */
13702 off_val = (int)(INTVAL (offset));
13703 if (off_val >= 0)
13704 tmp -= (off_val & 0xfffff);
13705 else
13706 tmp += ((- off_val) & 0xfffff);
13707
13708 *pri = tmp;
13709 return;
13710}
13711
13712/* Given OPERANDS of consecutive load/store, check if we can merge
13713 them into ldp/stp. LOAD is true if they are load instructions.
13714 MODE is the mode of memory operands. */
13715
13716bool
13717aarch64_operands_ok_for_ldpstp (rtx *operands, bool load,
13718 enum machine_mode mode)
13719{
13720 HOST_WIDE_INT offval_1, offval_2, msize;
13721 enum reg_class rclass_1, rclass_2;
13722 rtx mem_1, mem_2, reg_1, reg_2, base_1, base_2, offset_1, offset_2;
13723
13724 if (load)
13725 {
13726 mem_1 = operands[1];
13727 mem_2 = operands[3];
13728 reg_1 = operands[0];
13729 reg_2 = operands[2];
13730 gcc_assert (REG_P (reg_1) && REG_P (reg_2));
13731 if (REGNO (reg_1) == REGNO (reg_2))
13732 return false;
13733 }
13734 else
13735 {
13736 mem_1 = operands[0];
13737 mem_2 = operands[2];
13738 reg_1 = operands[1];
13739 reg_2 = operands[3];
13740 }
13741
bf84ac44
AP
13742 /* The mems cannot be volatile. */
13743 if (MEM_VOLATILE_P (mem_1) || MEM_VOLATILE_P (mem_2))
13744 return false;
13745
54700e2e
AP
13746 /* If we have SImode and slow unaligned ldp,
13747 check the alignment to be at least 8 byte. */
13748 if (mode == SImode
13749 && (aarch64_tune_params.extra_tuning_flags
13750 & AARCH64_EXTRA_TUNE_SLOW_UNALIGNED_LDPW)
13751 && !optimize_size
13752 && MEM_ALIGN (mem_1) < 8 * BITS_PER_UNIT)
13753 return false;
13754
350013bc
BC
13755 /* Check if the addresses are in the form of [base+offset]. */
13756 extract_base_offset_in_addr (mem_1, &base_1, &offset_1);
13757 if (base_1 == NULL_RTX || offset_1 == NULL_RTX)
13758 return false;
13759 extract_base_offset_in_addr (mem_2, &base_2, &offset_2);
13760 if (base_2 == NULL_RTX || offset_2 == NULL_RTX)
13761 return false;
13762
13763 /* Check if the bases are same. */
13764 if (!rtx_equal_p (base_1, base_2))
13765 return false;
13766
13767 offval_1 = INTVAL (offset_1);
13768 offval_2 = INTVAL (offset_2);
13769 msize = GET_MODE_SIZE (mode);
13770 /* Check if the offsets are consecutive. */
13771 if (offval_1 != (offval_2 + msize) && offval_2 != (offval_1 + msize))
13772 return false;
13773
13774 /* Check if the addresses are clobbered by load. */
13775 if (load)
13776 {
13777 if (reg_mentioned_p (reg_1, mem_1))
13778 return false;
13779
13780 /* In increasing order, the last load can clobber the address. */
13781 if (offval_1 > offval_2 && reg_mentioned_p (reg_2, mem_2))
13782 return false;
13783 }
13784
13785 if (REG_P (reg_1) && FP_REGNUM_P (REGNO (reg_1)))
13786 rclass_1 = FP_REGS;
13787 else
13788 rclass_1 = GENERAL_REGS;
13789
13790 if (REG_P (reg_2) && FP_REGNUM_P (REGNO (reg_2)))
13791 rclass_2 = FP_REGS;
13792 else
13793 rclass_2 = GENERAL_REGS;
13794
13795 /* Check if the registers are of same class. */
13796 if (rclass_1 != rclass_2)
13797 return false;
13798
13799 return true;
13800}
13801
13802/* Given OPERANDS of consecutive load/store, check if we can merge
13803 them into ldp/stp by adjusting the offset. LOAD is true if they
13804 are load instructions. MODE is the mode of memory operands.
13805
13806 Given below consecutive stores:
13807
13808 str w1, [xb, 0x100]
13809 str w1, [xb, 0x104]
13810 str w1, [xb, 0x108]
13811 str w1, [xb, 0x10c]
13812
13813 Though the offsets are out of the range supported by stp, we can
13814 still pair them after adjusting the offset, like:
13815
13816 add scratch, xb, 0x100
13817 stp w1, w1, [scratch]
13818 stp w1, w1, [scratch, 0x8]
13819
13820 The peephole patterns detecting this opportunity should guarantee
13821 the scratch register is avaliable. */
13822
13823bool
13824aarch64_operands_adjust_ok_for_ldpstp (rtx *operands, bool load,
13825 enum machine_mode mode)
13826{
13827 enum reg_class rclass_1, rclass_2, rclass_3, rclass_4;
13828 HOST_WIDE_INT offval_1, offval_2, offval_3, offval_4, msize;
13829 rtx mem_1, mem_2, mem_3, mem_4, reg_1, reg_2, reg_3, reg_4;
13830 rtx base_1, base_2, base_3, base_4, offset_1, offset_2, offset_3, offset_4;
13831
13832 if (load)
13833 {
13834 reg_1 = operands[0];
13835 mem_1 = operands[1];
13836 reg_2 = operands[2];
13837 mem_2 = operands[3];
13838 reg_3 = operands[4];
13839 mem_3 = operands[5];
13840 reg_4 = operands[6];
13841 mem_4 = operands[7];
13842 gcc_assert (REG_P (reg_1) && REG_P (reg_2)
13843 && REG_P (reg_3) && REG_P (reg_4));
13844 if (REGNO (reg_1) == REGNO (reg_2) || REGNO (reg_3) == REGNO (reg_4))
13845 return false;
13846 }
13847 else
13848 {
13849 mem_1 = operands[0];
13850 reg_1 = operands[1];
13851 mem_2 = operands[2];
13852 reg_2 = operands[3];
13853 mem_3 = operands[4];
13854 reg_3 = operands[5];
13855 mem_4 = operands[6];
13856 reg_4 = operands[7];
13857 }
13858 /* Skip if memory operand is by itslef valid for ldp/stp. */
13859 if (!MEM_P (mem_1) || aarch64_mem_pair_operand (mem_1, mode))
13860 return false;
13861
bf84ac44
AP
13862 /* The mems cannot be volatile. */
13863 if (MEM_VOLATILE_P (mem_1) || MEM_VOLATILE_P (mem_2)
13864 || MEM_VOLATILE_P (mem_3) ||MEM_VOLATILE_P (mem_4))
13865 return false;
13866
350013bc
BC
13867 /* Check if the addresses are in the form of [base+offset]. */
13868 extract_base_offset_in_addr (mem_1, &base_1, &offset_1);
13869 if (base_1 == NULL_RTX || offset_1 == NULL_RTX)
13870 return false;
13871 extract_base_offset_in_addr (mem_2, &base_2, &offset_2);
13872 if (base_2 == NULL_RTX || offset_2 == NULL_RTX)
13873 return false;
13874 extract_base_offset_in_addr (mem_3, &base_3, &offset_3);
13875 if (base_3 == NULL_RTX || offset_3 == NULL_RTX)
13876 return false;
13877 extract_base_offset_in_addr (mem_4, &base_4, &offset_4);
13878 if (base_4 == NULL_RTX || offset_4 == NULL_RTX)
13879 return false;
13880
13881 /* Check if the bases are same. */
13882 if (!rtx_equal_p (base_1, base_2)
13883 || !rtx_equal_p (base_2, base_3)
13884 || !rtx_equal_p (base_3, base_4))
13885 return false;
13886
13887 offval_1 = INTVAL (offset_1);
13888 offval_2 = INTVAL (offset_2);
13889 offval_3 = INTVAL (offset_3);
13890 offval_4 = INTVAL (offset_4);
13891 msize = GET_MODE_SIZE (mode);
13892 /* Check if the offsets are consecutive. */
13893 if ((offval_1 != (offval_2 + msize)
13894 || offval_1 != (offval_3 + msize * 2)
13895 || offval_1 != (offval_4 + msize * 3))
13896 && (offval_4 != (offval_3 + msize)
13897 || offval_4 != (offval_2 + msize * 2)
13898 || offval_4 != (offval_1 + msize * 3)))
13899 return false;
13900
13901 /* Check if the addresses are clobbered by load. */
13902 if (load)
13903 {
13904 if (reg_mentioned_p (reg_1, mem_1)
13905 || reg_mentioned_p (reg_2, mem_2)
13906 || reg_mentioned_p (reg_3, mem_3))
13907 return false;
13908
13909 /* In increasing order, the last load can clobber the address. */
13910 if (offval_1 > offval_2 && reg_mentioned_p (reg_4, mem_4))
13911 return false;
13912 }
13913
54700e2e
AP
13914 /* If we have SImode and slow unaligned ldp,
13915 check the alignment to be at least 8 byte. */
13916 if (mode == SImode
13917 && (aarch64_tune_params.extra_tuning_flags
13918 & AARCH64_EXTRA_TUNE_SLOW_UNALIGNED_LDPW)
13919 && !optimize_size
13920 && MEM_ALIGN (mem_1) < 8 * BITS_PER_UNIT)
13921 return false;
13922
350013bc
BC
13923 if (REG_P (reg_1) && FP_REGNUM_P (REGNO (reg_1)))
13924 rclass_1 = FP_REGS;
13925 else
13926 rclass_1 = GENERAL_REGS;
13927
13928 if (REG_P (reg_2) && FP_REGNUM_P (REGNO (reg_2)))
13929 rclass_2 = FP_REGS;
13930 else
13931 rclass_2 = GENERAL_REGS;
13932
13933 if (REG_P (reg_3) && FP_REGNUM_P (REGNO (reg_3)))
13934 rclass_3 = FP_REGS;
13935 else
13936 rclass_3 = GENERAL_REGS;
13937
13938 if (REG_P (reg_4) && FP_REGNUM_P (REGNO (reg_4)))
13939 rclass_4 = FP_REGS;
13940 else
13941 rclass_4 = GENERAL_REGS;
13942
13943 /* Check if the registers are of same class. */
13944 if (rclass_1 != rclass_2 || rclass_2 != rclass_3 || rclass_3 != rclass_4)
13945 return false;
13946
13947 return true;
13948}
13949
13950/* Given OPERANDS of consecutive load/store, this function pairs them
13951 into ldp/stp after adjusting the offset. It depends on the fact
13952 that addresses of load/store instructions are in increasing order.
13953 MODE is the mode of memory operands. CODE is the rtl operator
13954 which should be applied to all memory operands, it's SIGN_EXTEND,
13955 ZERO_EXTEND or UNKNOWN. */
13956
13957bool
13958aarch64_gen_adjusted_ldpstp (rtx *operands, bool load,
13959 enum machine_mode mode, RTX_CODE code)
13960{
13961 rtx base, offset, t1, t2;
13962 rtx mem_1, mem_2, mem_3, mem_4;
13963 HOST_WIDE_INT off_val, abs_off, adj_off, new_off, stp_off_limit, msize;
13964
13965 if (load)
13966 {
13967 mem_1 = operands[1];
13968 mem_2 = operands[3];
13969 mem_3 = operands[5];
13970 mem_4 = operands[7];
13971 }
13972 else
13973 {
13974 mem_1 = operands[0];
13975 mem_2 = operands[2];
13976 mem_3 = operands[4];
13977 mem_4 = operands[6];
13978 gcc_assert (code == UNKNOWN);
13979 }
13980
13981 extract_base_offset_in_addr (mem_1, &base, &offset);
13982 gcc_assert (base != NULL_RTX && offset != NULL_RTX);
13983
13984 /* Adjust offset thus it can fit in ldp/stp instruction. */
13985 msize = GET_MODE_SIZE (mode);
13986 stp_off_limit = msize * 0x40;
13987 off_val = INTVAL (offset);
13988 abs_off = (off_val < 0) ? -off_val : off_val;
13989 new_off = abs_off % stp_off_limit;
13990 adj_off = abs_off - new_off;
13991
13992 /* Further adjust to make sure all offsets are OK. */
13993 if ((new_off + msize * 2) >= stp_off_limit)
13994 {
13995 adj_off += stp_off_limit;
13996 new_off -= stp_off_limit;
13997 }
13998
13999 /* Make sure the adjustment can be done with ADD/SUB instructions. */
14000 if (adj_off >= 0x1000)
14001 return false;
14002
14003 if (off_val < 0)
14004 {
14005 adj_off = -adj_off;
14006 new_off = -new_off;
14007 }
14008
14009 /* Create new memory references. */
14010 mem_1 = change_address (mem_1, VOIDmode,
14011 plus_constant (DImode, operands[8], new_off));
14012
14013 /* Check if the adjusted address is OK for ldp/stp. */
14014 if (!aarch64_mem_pair_operand (mem_1, mode))
14015 return false;
14016
14017 msize = GET_MODE_SIZE (mode);
14018 mem_2 = change_address (mem_2, VOIDmode,
14019 plus_constant (DImode,
14020 operands[8],
14021 new_off + msize));
14022 mem_3 = change_address (mem_3, VOIDmode,
14023 plus_constant (DImode,
14024 operands[8],
14025 new_off + msize * 2));
14026 mem_4 = change_address (mem_4, VOIDmode,
14027 plus_constant (DImode,
14028 operands[8],
14029 new_off + msize * 3));
14030
14031 if (code == ZERO_EXTEND)
14032 {
14033 mem_1 = gen_rtx_ZERO_EXTEND (DImode, mem_1);
14034 mem_2 = gen_rtx_ZERO_EXTEND (DImode, mem_2);
14035 mem_3 = gen_rtx_ZERO_EXTEND (DImode, mem_3);
14036 mem_4 = gen_rtx_ZERO_EXTEND (DImode, mem_4);
14037 }
14038 else if (code == SIGN_EXTEND)
14039 {
14040 mem_1 = gen_rtx_SIGN_EXTEND (DImode, mem_1);
14041 mem_2 = gen_rtx_SIGN_EXTEND (DImode, mem_2);
14042 mem_3 = gen_rtx_SIGN_EXTEND (DImode, mem_3);
14043 mem_4 = gen_rtx_SIGN_EXTEND (DImode, mem_4);
14044 }
14045
14046 if (load)
14047 {
14048 operands[1] = mem_1;
14049 operands[3] = mem_2;
14050 operands[5] = mem_3;
14051 operands[7] = mem_4;
14052 }
14053 else
14054 {
14055 operands[0] = mem_1;
14056 operands[2] = mem_2;
14057 operands[4] = mem_3;
14058 operands[6] = mem_4;
14059 }
14060
14061 /* Emit adjusting instruction. */
f7df4a84 14062 emit_insn (gen_rtx_SET (operands[8], plus_constant (DImode, base, adj_off)));
350013bc 14063 /* Emit ldp/stp instructions. */
f7df4a84
RS
14064 t1 = gen_rtx_SET (operands[0], operands[1]);
14065 t2 = gen_rtx_SET (operands[2], operands[3]);
350013bc 14066 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, t1, t2)));
f7df4a84
RS
14067 t1 = gen_rtx_SET (operands[4], operands[5]);
14068 t2 = gen_rtx_SET (operands[6], operands[7]);
350013bc
BC
14069 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, t1, t2)));
14070 return true;
14071}
14072
1b1e81f8
JW
14073/* Return 1 if pseudo register should be created and used to hold
14074 GOT address for PIC code. */
14075
14076bool
14077aarch64_use_pseudo_pic_reg (void)
14078{
14079 return aarch64_cmodel == AARCH64_CMODEL_SMALL_SPIC;
14080}
14081
7b841a12
JW
14082/* Implement TARGET_UNSPEC_MAY_TRAP_P. */
14083
14084static int
14085aarch64_unspec_may_trap_p (const_rtx x, unsigned flags)
14086{
14087 switch (XINT (x, 1))
14088 {
14089 case UNSPEC_GOTSMALLPIC:
14090 case UNSPEC_GOTSMALLPIC28K:
14091 case UNSPEC_GOTTINYPIC:
14092 return 0;
14093 default:
14094 break;
14095 }
14096
14097 return default_unspec_may_trap_p (x, flags);
14098}
14099
39252973
KT
14100
14101/* If X is a positive CONST_DOUBLE with a value that is a power of 2
14102 return the log2 of that value. Otherwise return -1. */
14103
14104int
14105aarch64_fpconst_pow_of_2 (rtx x)
14106{
14107 const REAL_VALUE_TYPE *r;
14108
14109 if (!CONST_DOUBLE_P (x))
14110 return -1;
14111
14112 r = CONST_DOUBLE_REAL_VALUE (x);
14113
14114 if (REAL_VALUE_NEGATIVE (*r)
14115 || REAL_VALUE_ISNAN (*r)
14116 || REAL_VALUE_ISINF (*r)
14117 || !real_isinteger (r, DFmode))
14118 return -1;
14119
14120 return exact_log2 (real_to_integer (r));
14121}
14122
14123/* If X is a vector of equal CONST_DOUBLE values and that value is
14124 Y, return the aarch64_fpconst_pow_of_2 of Y. Otherwise return -1. */
14125
14126int
14127aarch64_vec_fpconst_pow_of_2 (rtx x)
14128{
14129 if (GET_CODE (x) != CONST_VECTOR)
14130 return -1;
14131
14132 if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT)
14133 return -1;
14134
14135 int firstval = aarch64_fpconst_pow_of_2 (CONST_VECTOR_ELT (x, 0));
14136 if (firstval <= 0)
14137 return -1;
14138
14139 for (int i = 1; i < CONST_VECTOR_NUNITS (x); i++)
14140 if (aarch64_fpconst_pow_of_2 (CONST_VECTOR_ELT (x, i)) != firstval)
14141 return -1;
14142
14143 return firstval;
14144}
14145
c2ec330c
AL
14146/* Implement TARGET_PROMOTED_TYPE to promote __fp16 to float. */
14147static tree
14148aarch64_promoted_type (const_tree t)
14149{
14150 if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
14151 return float_type_node;
14152 return NULL_TREE;
14153}
ee62a5a6
RS
14154
14155/* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
14156
14157static bool
9acc9cbe 14158aarch64_optab_supported_p (int op, machine_mode mode1, machine_mode,
ee62a5a6
RS
14159 optimization_type opt_type)
14160{
14161 switch (op)
14162 {
14163 case rsqrt_optab:
9acc9cbe 14164 return opt_type == OPTIMIZE_FOR_SPEED && use_rsqrt_p (mode1);
ee62a5a6
RS
14165
14166 default:
14167 return true;
14168 }
14169}
14170
43e9d192
IB
14171#undef TARGET_ADDRESS_COST
14172#define TARGET_ADDRESS_COST aarch64_address_cost
14173
14174/* This hook will determines whether unnamed bitfields affect the alignment
14175 of the containing structure. The hook returns true if the structure
14176 should inherit the alignment requirements of an unnamed bitfield's
14177 type. */
14178#undef TARGET_ALIGN_ANON_BITFIELD
14179#define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
14180
14181#undef TARGET_ASM_ALIGNED_DI_OP
14182#define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
14183
14184#undef TARGET_ASM_ALIGNED_HI_OP
14185#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
14186
14187#undef TARGET_ASM_ALIGNED_SI_OP
14188#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
14189
14190#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
14191#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
14192 hook_bool_const_tree_hwi_hwi_const_tree_true
14193
e1c1ecb0
KT
14194#undef TARGET_ASM_FILE_START
14195#define TARGET_ASM_FILE_START aarch64_start_file
14196
43e9d192
IB
14197#undef TARGET_ASM_OUTPUT_MI_THUNK
14198#define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
14199
14200#undef TARGET_ASM_SELECT_RTX_SECTION
14201#define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
14202
14203#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
14204#define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
14205
14206#undef TARGET_BUILD_BUILTIN_VA_LIST
14207#define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
14208
14209#undef TARGET_CALLEE_COPIES
14210#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
14211
14212#undef TARGET_CAN_ELIMINATE
14213#define TARGET_CAN_ELIMINATE aarch64_can_eliminate
14214
1fd8d40c
KT
14215#undef TARGET_CAN_INLINE_P
14216#define TARGET_CAN_INLINE_P aarch64_can_inline_p
14217
43e9d192
IB
14218#undef TARGET_CANNOT_FORCE_CONST_MEM
14219#define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
14220
50487d79
EM
14221#undef TARGET_CASE_VALUES_THRESHOLD
14222#define TARGET_CASE_VALUES_THRESHOLD aarch64_case_values_threshold
14223
43e9d192
IB
14224#undef TARGET_CONDITIONAL_REGISTER_USAGE
14225#define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
14226
14227/* Only the least significant bit is used for initialization guard
14228 variables. */
14229#undef TARGET_CXX_GUARD_MASK_BIT
14230#define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
14231
14232#undef TARGET_C_MODE_FOR_SUFFIX
14233#define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
14234
14235#ifdef TARGET_BIG_ENDIAN_DEFAULT
14236#undef TARGET_DEFAULT_TARGET_FLAGS
14237#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
14238#endif
14239
14240#undef TARGET_CLASS_MAX_NREGS
14241#define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
14242
119103ca
JG
14243#undef TARGET_BUILTIN_DECL
14244#define TARGET_BUILTIN_DECL aarch64_builtin_decl
14245
a6fc00da
BH
14246#undef TARGET_BUILTIN_RECIPROCAL
14247#define TARGET_BUILTIN_RECIPROCAL aarch64_builtin_reciprocal
14248
43e9d192
IB
14249#undef TARGET_EXPAND_BUILTIN
14250#define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
14251
14252#undef TARGET_EXPAND_BUILTIN_VA_START
14253#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
14254
9697e620
JG
14255#undef TARGET_FOLD_BUILTIN
14256#define TARGET_FOLD_BUILTIN aarch64_fold_builtin
14257
43e9d192
IB
14258#undef TARGET_FUNCTION_ARG
14259#define TARGET_FUNCTION_ARG aarch64_function_arg
14260
14261#undef TARGET_FUNCTION_ARG_ADVANCE
14262#define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
14263
14264#undef TARGET_FUNCTION_ARG_BOUNDARY
14265#define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
14266
14267#undef TARGET_FUNCTION_OK_FOR_SIBCALL
14268#define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
14269
14270#undef TARGET_FUNCTION_VALUE
14271#define TARGET_FUNCTION_VALUE aarch64_function_value
14272
14273#undef TARGET_FUNCTION_VALUE_REGNO_P
14274#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
14275
14276#undef TARGET_FRAME_POINTER_REQUIRED
14277#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
14278
fc72cba7
AL
14279#undef TARGET_GIMPLE_FOLD_BUILTIN
14280#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
0ac198d3 14281
43e9d192
IB
14282#undef TARGET_GIMPLIFY_VA_ARG_EXPR
14283#define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
14284
14285#undef TARGET_INIT_BUILTINS
14286#define TARGET_INIT_BUILTINS aarch64_init_builtins
14287
c64f7d37
WD
14288#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
14289#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
14290 aarch64_ira_change_pseudo_allocno_class
14291
43e9d192
IB
14292#undef TARGET_LEGITIMATE_ADDRESS_P
14293#define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
14294
14295#undef TARGET_LEGITIMATE_CONSTANT_P
14296#define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
14297
491ec060
WD
14298#undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
14299#define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT \
14300 aarch64_legitimize_address_displacement
14301
43e9d192
IB
14302#undef TARGET_LIBGCC_CMP_RETURN_MODE
14303#define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
14304
ac2b960f
YZ
14305#undef TARGET_MANGLE_TYPE
14306#define TARGET_MANGLE_TYPE aarch64_mangle_type
14307
43e9d192
IB
14308#undef TARGET_MEMORY_MOVE_COST
14309#define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
14310
26e0ff94
WD
14311#undef TARGET_MIN_DIVISIONS_FOR_RECIP_MUL
14312#define TARGET_MIN_DIVISIONS_FOR_RECIP_MUL aarch64_min_divisions_for_recip_mul
14313
43e9d192
IB
14314#undef TARGET_MUST_PASS_IN_STACK
14315#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
14316
14317/* This target hook should return true if accesses to volatile bitfields
14318 should use the narrowest mode possible. It should return false if these
14319 accesses should use the bitfield container type. */
14320#undef TARGET_NARROW_VOLATILE_BITFIELD
14321#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
14322
14323#undef TARGET_OPTION_OVERRIDE
14324#define TARGET_OPTION_OVERRIDE aarch64_override_options
14325
14326#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
14327#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
14328 aarch64_override_options_after_change
14329
361fb3ee
KT
14330#undef TARGET_OPTION_SAVE
14331#define TARGET_OPTION_SAVE aarch64_option_save
14332
14333#undef TARGET_OPTION_RESTORE
14334#define TARGET_OPTION_RESTORE aarch64_option_restore
14335
14336#undef TARGET_OPTION_PRINT
14337#define TARGET_OPTION_PRINT aarch64_option_print
14338
5a2c8331
KT
14339#undef TARGET_OPTION_VALID_ATTRIBUTE_P
14340#define TARGET_OPTION_VALID_ATTRIBUTE_P aarch64_option_valid_attribute_p
14341
d78006d9
KT
14342#undef TARGET_SET_CURRENT_FUNCTION
14343#define TARGET_SET_CURRENT_FUNCTION aarch64_set_current_function
14344
43e9d192
IB
14345#undef TARGET_PASS_BY_REFERENCE
14346#define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
14347
14348#undef TARGET_PREFERRED_RELOAD_CLASS
14349#define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
14350
cee66c68
WD
14351#undef TARGET_SCHED_REASSOCIATION_WIDTH
14352#define TARGET_SCHED_REASSOCIATION_WIDTH aarch64_reassociation_width
14353
c2ec330c
AL
14354#undef TARGET_PROMOTED_TYPE
14355#define TARGET_PROMOTED_TYPE aarch64_promoted_type
14356
43e9d192
IB
14357#undef TARGET_SECONDARY_RELOAD
14358#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
14359
14360#undef TARGET_SHIFT_TRUNCATION_MASK
14361#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
14362
14363#undef TARGET_SETUP_INCOMING_VARARGS
14364#define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
14365
14366#undef TARGET_STRUCT_VALUE_RTX
14367#define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
14368
14369#undef TARGET_REGISTER_MOVE_COST
14370#define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
14371
14372#undef TARGET_RETURN_IN_MEMORY
14373#define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
14374
14375#undef TARGET_RETURN_IN_MSB
14376#define TARGET_RETURN_IN_MSB aarch64_return_in_msb
14377
14378#undef TARGET_RTX_COSTS
7cc2145f 14379#define TARGET_RTX_COSTS aarch64_rtx_costs_wrapper
43e9d192 14380
d126a4ae
AP
14381#undef TARGET_SCHED_ISSUE_RATE
14382#define TARGET_SCHED_ISSUE_RATE aarch64_sched_issue_rate
14383
d03f7e44
MK
14384#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
14385#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
14386 aarch64_sched_first_cycle_multipass_dfa_lookahead
14387
2d6bc7fa
KT
14388#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
14389#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD \
14390 aarch64_first_cycle_multipass_dfa_lookahead_guard
14391
43e9d192
IB
14392#undef TARGET_TRAMPOLINE_INIT
14393#define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
14394
14395#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
14396#define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
14397
14398#undef TARGET_VECTOR_MODE_SUPPORTED_P
14399#define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
14400
14401#undef TARGET_ARRAY_MODE_SUPPORTED_P
14402#define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
14403
8990e73a
TB
14404#undef TARGET_VECTORIZE_ADD_STMT_COST
14405#define TARGET_VECTORIZE_ADD_STMT_COST aarch64_add_stmt_cost
14406
14407#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
14408#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
14409 aarch64_builtin_vectorization_cost
14410
43e9d192
IB
14411#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14412#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
14413
42fc9a7f
JG
14414#undef TARGET_VECTORIZE_BUILTINS
14415#define TARGET_VECTORIZE_BUILTINS
14416
14417#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
14418#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
14419 aarch64_builtin_vectorized_function
14420
3b357264
JG
14421#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
14422#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
14423 aarch64_autovectorize_vector_sizes
14424
aa87aced
KV
14425#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14426#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV \
14427 aarch64_atomic_assign_expand_fenv
14428
43e9d192
IB
14429/* Section anchor support. */
14430
14431#undef TARGET_MIN_ANCHOR_OFFSET
14432#define TARGET_MIN_ANCHOR_OFFSET -256
14433
14434/* Limit the maximum anchor offset to 4k-1, since that's the limit for a
14435 byte offset; we can do much more for larger data types, but have no way
14436 to determine the size of the access. We assume accesses are aligned. */
14437#undef TARGET_MAX_ANCHOR_OFFSET
14438#define TARGET_MAX_ANCHOR_OFFSET 4095
14439
db0253a4
TB
14440#undef TARGET_VECTOR_ALIGNMENT
14441#define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment
14442
14443#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
14444#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
14445 aarch64_simd_vector_alignment_reachable
14446
88b08073
JG
14447/* vec_perm support. */
14448
14449#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
14450#define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
14451 aarch64_vectorize_vec_perm_const_ok
14452
c2ec330c
AL
14453#undef TARGET_INIT_LIBFUNCS
14454#define TARGET_INIT_LIBFUNCS aarch64_init_libfuncs
70f09188 14455
706b2314 14456#undef TARGET_FIXED_CONDITION_CODE_REGS
70f09188
AP
14457#define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
14458
5cb74e90
RR
14459#undef TARGET_FLAGS_REGNUM
14460#define TARGET_FLAGS_REGNUM CC_REGNUM
14461
78607708
TV
14462#undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
14463#define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
14464
a3125fc2
CL
14465#undef TARGET_ASAN_SHADOW_OFFSET
14466#define TARGET_ASAN_SHADOW_OFFSET aarch64_asan_shadow_offset
14467
0c4ec427
RE
14468#undef TARGET_LEGITIMIZE_ADDRESS
14469#define TARGET_LEGITIMIZE_ADDRESS aarch64_legitimize_address
14470
d3006da6
JG
14471#undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14472#define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14473 aarch64_use_by_pieces_infrastructure_p
14474
594bdd53
FY
14475#undef TARGET_CAN_USE_DOLOOP_P
14476#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
14477
6a569cdd
KT
14478#undef TARGET_SCHED_MACRO_FUSION_P
14479#define TARGET_SCHED_MACRO_FUSION_P aarch64_macro_fusion_p
14480
14481#undef TARGET_SCHED_MACRO_FUSION_PAIR_P
14482#define TARGET_SCHED_MACRO_FUSION_PAIR_P aarch_macro_fusion_pair_p
14483
350013bc
BC
14484#undef TARGET_SCHED_FUSION_PRIORITY
14485#define TARGET_SCHED_FUSION_PRIORITY aarch64_sched_fusion_priority
14486
7b841a12
JW
14487#undef TARGET_UNSPEC_MAY_TRAP_P
14488#define TARGET_UNSPEC_MAY_TRAP_P aarch64_unspec_may_trap_p
14489
1b1e81f8
JW
14490#undef TARGET_USE_PSEUDO_PIC_REG
14491#define TARGET_USE_PSEUDO_PIC_REG aarch64_use_pseudo_pic_reg
14492
cc8ca59e
JB
14493#undef TARGET_PRINT_OPERAND
14494#define TARGET_PRINT_OPERAND aarch64_print_operand
14495
14496#undef TARGET_PRINT_OPERAND_ADDRESS
14497#define TARGET_PRINT_OPERAND_ADDRESS aarch64_print_operand_address
14498
ee62a5a6
RS
14499#undef TARGET_OPTAB_SUPPORTED_P
14500#define TARGET_OPTAB_SUPPORTED_P aarch64_optab_supported_p
14501
43203dea
RR
14502#undef TARGET_OMIT_STRUCT_RETURN_REG
14503#define TARGET_OMIT_STRUCT_RETURN_REG true
14504
43e9d192
IB
14505struct gcc_target targetm = TARGET_INITIALIZER;
14506
14507#include "gt-aarch64.h"