;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2019 Free Software Foundation, Inc.
+;; Copyright (C) 2009-2020 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;;
;; This file is part of GCC.
return CONST_INT_P (op) && IN_RANGE (INTVAL (op), 1, 3);
})
-(define_special_predicate "subreg_lowpart_operator"
- (and (match_code "subreg")
- (match_test "subreg_lowpart_p (op)")))
+(define_predicate "subreg_lowpart_operator"
+ (ior (match_code "truncate")
+ (and (match_code "subreg")
+ (match_test "subreg_lowpart_p (op)"))))
(define_predicate "aarch64_ccmp_immediate"
(and (match_code "const_int")
(and (match_code "const_double")
(match_test "aarch64_fpconst_pow_of_2 (op) > 0")))
+(define_predicate "aarch64_fp_pow2_recip"
+ (and (match_code "const_double")
+ (match_test "aarch64_fpconst_pow2_recip (op) > 0")))
+
(define_predicate "aarch64_fp_vec_pow2"
(match_test "aarch64_vec_fpconst_pow_of_2 (op) > 0"))
(and (match_operand 0 "aarch64_pluslong_immediate")
(not (match_operand 0 "aarch64_plus_immediate"))))
+(define_predicate "aarch64_sve_scalar_inc_dec_immediate"
+ (and (match_code "const_poly_int")
+ (match_test "aarch64_sve_scalar_inc_dec_immediate_p (op)")))
+
(define_predicate "aarch64_sve_addvl_addpl_immediate"
(and (match_code "const_poly_int")
(match_test "aarch64_sve_addvl_addpl_immediate_p (op)")))
+(define_predicate "aarch64_sve_plus_immediate"
+ (ior (match_operand 0 "aarch64_sve_scalar_inc_dec_immediate")
+ (match_operand 0 "aarch64_sve_addvl_addpl_immediate")))
+
(define_predicate "aarch64_split_add_offset_immediate"
(and (match_code "const_poly_int")
(match_test "aarch64_add_offset_temporaries (op) == 1")))
(define_predicate "aarch64_pluslong_operand"
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_pluslong_immediate")
- (match_operand 0 "aarch64_sve_addvl_addpl_immediate")))
+ (and (match_test "TARGET_SVE")
+ (match_operand 0 "aarch64_sve_plus_immediate"))))
(define_predicate "aarch64_pluslong_or_poly_operand"
(ior (match_operand 0 "aarch64_pluslong_operand")
return aarch64_stepped_int_parallel_p (op, -1);
})
+(define_predicate "ascending_int_parallel"
+ (match_code "parallel")
+{
+ return aarch64_stepped_int_parallel_p (op, 1);
+})
+
(define_special_predicate "aarch64_simd_lshift_imm"
(match_code "const,const_vector")
{
(match_test "op == const0_rtx")
(match_operand 0 "aarch64_simd_or_scalar_imm_zero"))))
+(define_predicate "aarch64_simd_reg_or_minus_one"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_simd_imm_minus_one")))
+
(define_predicate "aarch64_simd_struct_operand"
(and (match_code "mem")
(match_test "TARGET_SIMD && aarch64_simd_mem_operand_p (op)")))
(and (match_code "mem")
(match_test "aarch64_sve_ld1rq_operand_p (op)")))
+(define_predicate "aarch64_sve_ldff1_operand"
+ (and (match_code "mem")
+ (match_test "aarch64_sve_ldff1_operand_p (op)")))
+
+(define_predicate "aarch64_sve_ldnf1_operand"
+ (and (match_code "mem")
+ (match_test "aarch64_sve_ldnf1_operand_p (op)")))
+
;; Like memory_operand, but restricted to addresses that are valid for
;; SVE LDR and STR instructions.
(define_predicate "aarch64_sve_ldr_operand"
(and (match_code "mem")
(match_test "aarch64_sve_ldr_operand_p (op)")))
+(define_special_predicate "aarch64_sve_prefetch_operand"
+ (and (match_code "reg, plus")
+ (match_test "aarch64_sve_prefetch_operand_p (op, mode)")))
+
(define_predicate "aarch64_sve_nonimmediate_operand"
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_ldr_operand")))
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_ld1r_operand")))
+(define_predicate "aarch64_sve_ptrue_svpattern_immediate"
+ (and (match_code "const")
+ (match_test "aarch64_sve_ptrue_svpattern_p (op, NULL)")))
+
(define_predicate "aarch64_sve_arith_immediate"
(and (match_code "const,const_vector")
(match_test "aarch64_sve_arith_immediate_p (op, false)")))
(and (match_code "const,const_vector")
(match_test "aarch64_sve_arith_immediate_p (op, true)")))
-(define_predicate "aarch64_sve_inc_dec_immediate"
+(define_predicate "aarch64_sve_qadd_immediate"
+ (and (match_code "const,const_vector")
+ (match_test "aarch64_sve_sqadd_sqsub_immediate_p (op, false)")))
+
+(define_predicate "aarch64_sve_qsub_immediate"
+ (and (match_code "const,const_vector")
+ (match_test "aarch64_sve_sqadd_sqsub_immediate_p (op, true)")))
+
+(define_predicate "aarch64_sve_vector_inc_dec_immediate"
(and (match_code "const,const_vector")
- (match_test "aarch64_sve_inc_dec_immediate_p (op)")))
+ (match_test "aarch64_sve_vector_inc_dec_immediate_p (op)")))
+
+(define_predicate "aarch64_sve_gather_immediate_b"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
+
+(define_predicate "aarch64_sve_gather_immediate_h"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 62)")
+ (match_test "(INTVAL (op) & 1) == 0")))
+
+(define_predicate "aarch64_sve_gather_immediate_w"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 124)")
+ (match_test "(INTVAL (op) & 3) == 0")))
+
+(define_predicate "aarch64_sve_gather_immediate_d"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 248)")
+ (match_test "(INTVAL (op) & 7) == 0")))
(define_predicate "aarch64_sve_uxtb_immediate"
(and (match_code "const_vector")
(match_test "aarch64_float_const_representable_p (op)"))))
(define_predicate "aarch64_sve_cmp_vsc_immediate"
- (and (match_code "const,const_vector")
+ (and (match_code "const_int,const_vector")
(match_test "aarch64_sve_cmp_immediate_p (op, true)")))
(define_predicate "aarch64_sve_cmp_vsd_immediate"
- (and (match_code "const,const_vector")
+ (and (match_code "const_int,const_vector")
(match_test "aarch64_sve_cmp_immediate_p (op, false)")))
(define_predicate "aarch64_sve_index_immediate"
(and (match_code "const,const_vector")
(match_test "aarch64_sve_float_arith_immediate_p (op, false)")))
-(define_predicate "aarch64_sve_float_arith_with_sub_immediate"
+(define_predicate "aarch64_sve_float_negated_arith_immediate"
(and (match_code "const,const_vector")
(match_test "aarch64_sve_float_arith_immediate_p (op, true)")))
+(define_predicate "aarch64_sve_float_arith_with_sub_immediate"
+ (ior (match_operand 0 "aarch64_sve_float_arith_immediate")
+ (match_operand 0 "aarch64_sve_float_negated_arith_immediate")))
+
(define_predicate "aarch64_sve_float_mul_immediate"
(and (match_code "const,const_vector")
(match_test "aarch64_sve_float_mul_immediate_p (op)")))
(define_predicate "aarch64_sve_add_operand"
(ior (match_operand 0 "aarch64_sve_arith_operand")
(match_operand 0 "aarch64_sve_sub_arith_immediate")
- (match_operand 0 "aarch64_sve_inc_dec_immediate")))
+ (match_operand 0 "aarch64_sve_vector_inc_dec_immediate")))
+
+(define_predicate "aarch64_sve_sqadd_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_sve_qadd_immediate")
+ (match_operand 0 "aarch64_sve_qsub_immediate")))
(define_predicate "aarch64_sve_pred_and_operand"
(ior (match_operand 0 "register_operand")
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_logical_immediate")))
+(define_predicate "aarch64_sve_gather_offset_b"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_sve_gather_immediate_b")))
+
+(define_predicate "aarch64_sve_gather_offset_h"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_sve_gather_immediate_h")))
+
+(define_predicate "aarch64_sve_gather_offset_w"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_sve_gather_immediate_w")))
+
+(define_predicate "aarch64_sve_gather_offset_d"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_sve_gather_immediate_d")))
+
(define_predicate "aarch64_sve_lshift_operand"
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_simd_lshift_imm")))
(match_operand 0 "aarch64_sve_float_arith_immediate")))
(define_predicate "aarch64_sve_float_arith_with_sub_operand"
- (ior (match_operand 0 "aarch64_sve_float_arith_operand")
+ (ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_float_arith_with_sub_immediate")))
(define_predicate "aarch64_sve_float_mul_operand"
(ior (match_test "INTVAL (op) == SVE_RELAXED_GP")
(match_test "INTVAL (op) == SVE_STRICT_GP"))))
+(define_predicate "aarch64_gather_scale_operand_b"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 1")))
+
+(define_predicate "aarch64_gather_scale_operand_h"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 1 || INTVAL (op) == 2")))
+
(define_predicate "aarch64_gather_scale_operand_w"
(and (match_code "const_int")
(match_test "INTVAL (op) == 1 || INTVAL (op) == 4")))
(define_predicate "aarch64_sve_any_binary_operator"
(match_code "plus,minus,mult,div,udiv,smax,umax,smin,umin,and,ior,xor"))
+
+(define_predicate "aarch64_bytes_per_sve_vector_operand"
+ (and (match_code "const_int,const_poly_int")
+ (match_test "known_eq (wi::to_poly_wide (op, mode),
+ BYTES_PER_SVE_VECTOR)")))
+
+(define_predicate "aarch64_memtag_tag_offset"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
+
+(define_predicate "aarch64_granule16_uimm6"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 1008)
+ && !(INTVAL (op) & 0xf)")))
+
+(define_predicate "aarch64_granule16_simm9"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -4096, 4080)
+ && !(INTVAL (op) & 0xf)")))