;; Predicate definitions for S/390 and zSeries.
-;; Copyright (C) 2005 Free Software Foundation, Inc.
+;; Copyright (C) 2005-2020 Free Software Foundation, Inc.
;; Contributed by Hartmut Penner (hpenner@de.ibm.com) and
;; Ulrich Weigand (uweigand@de.ibm.com).
;;
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 2, or (at your option)
+;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING. If not, write to
-;; the Free Software Foundation, 59 Temple Place - Suite 330,
-;; Boston, MA 02111-1307, USA.
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
;; OP is the current operation.
;; MODE is the current operation mode.
;; operands --------------------------------------------------------------
-;; Return true if OP a (const_int 0) operand.
-
+;; Return true if OP a const 0 operand (int/float/vector).
(define_predicate "const0_operand"
- (and (match_code "const_int, const_double")
+ (and (match_code "const_int,const_wide_int,const_double,const_vector")
(match_test "op == CONST0_RTX (mode)")))
+;; Return true if OP an all ones operand (int/vector).
+(define_predicate "all_ones_operand"
+ (and (match_code "const_int, const_wide_int, const_vector")
+ (match_test "INTEGRAL_MODE_P (GET_MODE (op))")
+ (match_test "op == CONSTM1_RTX (mode)")))
+
+;; Return true if OP is a 4 bit mask operand
+(define_predicate "const_mask_operand"
+ (and (match_code "const_int")
+ (match_test "UINTVAL (op) < 16")))
+
;; Return true if OP is constant.
(define_special_predicate "consttable_operand"
- (and (match_code "symbol_ref, label_ref, const, const_int, const_double")
+ (and (match_code "symbol_ref, label_ref, const, const_int, const_wide_int, const_double, const_vector")
(match_test "CONSTANT_P (op)")))
+; An operand used as vector permutation pattern
+
+; This in particular accepts constants which would otherwise be
+; rejected. These constants require special post reload handling
+
+(define_special_predicate "permute_pattern_operand"
+ (and (match_code "const_vector,mem,reg,subreg")
+ (match_test "GET_MODE (op) == V16QImode")
+ (match_test "!MEM_P (op) || s390_mem_constraint (\"R\", op)")))
+
;; Return true if OP is a valid S-type operand.
(define_predicate "s_operand"
return true;
})
+;; Return true of the address of the mem operand plus 16 is still a
+;; valid Q constraint address.
+
+(define_predicate "plus16_Q_operand"
+ (and (match_code "mem")
+ (match_operand 0 "general_operand"))
+{
+ rtx addr = XEXP (op, 0);
+ if (REG_P (addr))
+ return true;
+
+ if (GET_CODE (addr) != PLUS
+ || !REG_P (XEXP (addr, 0))
+ || !CONST_INT_P (XEXP (addr, 1)))
+ return false;
+
+ return SHORT_DISP_IN_RANGE (INTVAL (XEXP (addr, 1)) + 16);
+})
+
;; Return true if OP is a valid operand for the BRAS instruction.
;; Allow SYMBOL_REFs and @PLT stubs.
(define_special_predicate "bras_sym_operand"
- (ior (match_code "symbol_ref")
+ (ior (and (match_code "symbol_ref")
+ (match_test "!flag_pic || SYMBOL_REF_LOCAL_P (op)"))
(and (match_code "const")
(and (match_test "GET_CODE (XEXP (op, 0)) == UNSPEC")
(match_test "XINT (XEXP (op, 0), 1) == UNSPEC_PLT")))))
(and (match_test "mode == Pmode")
(match_test "!legitimate_la_operand_p (op)"))))
-;; Return true if OP is a valid shift count operand.
+;; Return true if OP is a valid operand as scalar shift count or setmem.
-(define_predicate "shift_count_operand"
+(define_predicate "setmem_operand"
(match_code "reg, subreg, plus, const_int")
{
- HOST_WIDE_INT offset = 0;
+ HOST_WIDE_INT offset;
+ rtx base;
- /* We can have an integer constant, an address register,
- or a sum of the two. Note that reload already checks
- that any register present is an address register, so
- we just check for any register here. */
- if (GET_CODE (op) == CONST_INT)
- {
- offset = INTVAL (op);
- op = NULL_RTX;
- }
- if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
- {
- offset = INTVAL (XEXP (op, 1));
- op = XEXP (op, 0);
- }
- while (op && GET_CODE (op) == SUBREG)
- op = SUBREG_REG (op);
- if (op && GET_CODE (op) != REG)
+ if (GET_MODE (op) != VOIDmode
+ && GET_MODE_CLASS (GET_MODE (op)) != MODE_INT)
+ return false;
+
+ /* Extract base register and offset. */
+ if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
+ return false;
+
+ /* Don't allow any non-base hard registers. Doing so without
+ confusing reload and/or regrename would be tricky, and doesn't
+ buy us much anyway. */
+ if (base && REGNO (base) < FIRST_PSEUDO_REGISTER && !ADDR_REG_P (base))
return false;
/* Unfortunately we have to reject constants that are invalid
return true;
})
+; An integer operand with the lowest order 6 bits all ones.
+(define_predicate "const_int_6bitset_operand"
+ (and (match_code "const_int")
+ (match_test "(INTVAL (op) & 63) == 63")))
+(define_predicate "nonzero_shift_count_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, GET_MODE_BITSIZE (mode) - 1)")))
+
;; Return true if OP a valid operand for the LARL instruction.
(define_predicate "larl_operand"
- (match_code "label_ref, symbol_ref, const, const_int, const_double")
+ (match_code "label_ref, symbol_ref, const")
{
/* Allow labels and local symbols. */
if (GET_CODE (op) == LABEL_REF)
return true;
- if (GET_CODE (op) == SYMBOL_REF)
- return ((SYMBOL_REF_FLAGS (op) & SYMBOL_FLAG_ALIGN1) == 0
+ if (SYMBOL_REF_P (op))
+ return (!SYMBOL_FLAG_NOTALIGN2_P (op)
&& SYMBOL_REF_TLS_MODEL (op) == 0
- && (!flag_pic || SYMBOL_REF_LOCAL_P (op)));
+ && s390_rel_address_ok_p (op));
/* Everything else must have a CONST, so strip it. */
if (GET_CODE (op) != CONST)
if (GET_CODE (XEXP (op, 1)) != CONST_INT
|| (INTVAL (XEXP (op, 1)) & 1) != 0)
return false;
- if (INTVAL (XEXP (op, 1)) >= (HOST_WIDE_INT)1 << 32
- || INTVAL (XEXP (op, 1)) < -((HOST_WIDE_INT)1 << 32))
+ if (INTVAL (XEXP (op, 1)) >= HOST_WIDE_INT_1 << 31
+ || INTVAL (XEXP (op, 1)) < -(HOST_WIDE_INT_1 << 31))
return false;
op = XEXP (op, 0);
}
/* Labels and local symbols allowed here as well. */
if (GET_CODE (op) == LABEL_REF)
return true;
- if (GET_CODE (op) == SYMBOL_REF)
- return ((SYMBOL_REF_FLAGS (op) & SYMBOL_FLAG_ALIGN1) == 0
+ if (SYMBOL_REF_P (op))
+ return (!SYMBOL_FLAG_NOTALIGN2_P (op)
&& SYMBOL_REF_TLS_MODEL (op) == 0
- && (!flag_pic || SYMBOL_REF_LOCAL_P (op)));
+ && s390_rel_address_ok_p (op));
+
/* Now we must have a @GOTENT offset or @PLT stub
or an @INDNTPOFF TLS offset. */
return false;
})
+; Predicate that always allows wraparound of the one-bit range.
+(define_predicate "contiguous_bitmask_operand"
+ (match_code "const_int")
+{
+ return s390_contiguous_bitmask_p (INTVAL (op), true,
+ GET_MODE_BITSIZE (mode), NULL, NULL);
+})
+
+; Same without wraparound.
+(define_predicate "contiguous_bitmask_nowrap_operand"
+ (match_code "const_int")
+{
+ return s390_contiguous_bitmask_p
+ (INTVAL (op), false, GET_MODE_BITSIZE (mode), NULL, NULL);
+})
+
+;; Return true if OP is legitimate for any LOC instruction.
+
+(define_predicate "loc_operand"
+ (ior (match_operand 0 "nonimmediate_operand")
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) <= 32767 && INTVAL (op) >= -32768"))))
+
+(define_predicate "reload_const_wide_int_operand"
+ (and (match_code "const_wide_int")
+ (match_test "legitimate_reload_constant_p (op)")))
+
+
;; operators --------------------------------------------------------------
;; Return nonzero if OP is a valid comparison operator
{
if (GET_CODE (XEXP (op, 0)) != REG
|| REGNO (XEXP (op, 0)) != CC_REGNUM
- || XEXP (op, 1) != const0_rtx)
+ || (XEXP (op, 1) != const0_rtx
+ && !(CONST_INT_P (XEXP (op, 1))
+ && GET_MODE (XEXP (op, 0)) == CCRAWmode
+ && INTVAL (XEXP (op, 1)) >= 0
+ && INTVAL (XEXP (op, 1)) <= 15)))
return false;
return (s390_branch_condition_mask (op) >= 0);
})
+;; Return true if op is the cc register.
+(define_predicate "cc_reg_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == CC_REGNUM")))
+
+(define_predicate "s390_signed_integer_comparison"
+ (match_code "eq, ne, lt, gt, le, ge")
+{
+ return (s390_compare_and_branch_condition_mask (op) >= 0);
+})
+
+(define_predicate "s390_unsigned_integer_comparison"
+ (match_code "eq, ne, ltu, gtu, leu, geu")
+{
+ return (s390_compare_and_branch_condition_mask (op) >= 0);
+})
+
+;; Return nonzero if OP is a valid comparison operator for the
+;; cstore expanders -- respectively cstorecc4 and integer cstore.
+(define_predicate "s390_eqne_operator"
+ (match_code "eq, ne"))
+
+(define_predicate "s390_scond_operator"
+ (match_code "ltu, gtu, leu, geu"))
+
+(define_predicate "s390_brx_operator"
+ (match_code "le, gt"))
+
;; Return nonzero if OP is a valid comparison operator
;; for an ALC condition.
if (GET_CODE (XEXP (op, 0)) != REG
|| REGNO (XEXP (op, 0)) != CC_REGNUM
- || XEXP (op, 1) != const0_rtx)
+ || (XEXP (op, 1) != const0_rtx
+ && !(CONST_INT_P (XEXP (op, 1))
+ && GET_MODE (XEXP (op, 0)) == CCRAWmode
+ && INTVAL (XEXP (op, 1)) >= 0
+ && INTVAL (XEXP (op, 1)) <= 15)))
return false;
switch (GET_MODE (XEXP (op, 0)))
{
- case CCL1mode:
+ case E_CCL1mode:
return GET_CODE (op) == LTU;
- case CCL2mode:
+ case E_CCL2mode:
return GET_CODE (op) == LEU;
- case CCL3mode:
+ case E_CCL3mode:
return GET_CODE (op) == GEU;
- case CCUmode:
+ case E_CCUmode:
return GET_CODE (op) == GTU;
- case CCURmode:
+ case E_CCURmode:
return GET_CODE (op) == LTU;
- case CCSmode:
+ case E_CCSmode:
return GET_CODE (op) == UNGT;
- case CCSRmode:
+ case E_CCSRmode:
return GET_CODE (op) == UNLT;
default:
switch (GET_MODE (XEXP (op, 0)))
{
- case CCL1mode:
+ case E_CCL1mode:
return GET_CODE (op) == GEU;
- case CCL2mode:
+ case E_CCL2mode:
return GET_CODE (op) == GTU;
- case CCL3mode:
+ case E_CCL3mode:
return GET_CODE (op) == LTU;
- case CCUmode:
+ case E_CCUmode:
return GET_CODE (op) == LEU;
- case CCURmode:
+ case E_CCURmode:
return GET_CODE (op) == GEU;
- case CCSmode:
+ case E_CCSmode:
return GET_CODE (op) == LE;
- case CCSRmode:
+ case E_CCSRmode:
return GET_CODE (op) == GE;
default:
(define_special_predicate "load_multiple_operation"
(match_code "parallel")
{
- enum machine_mode elt_mode;
+ machine_mode elt_mode;
int count = XVECLEN (op, 0);
unsigned int dest_regno;
rtx src_addr;
return true;
})
+;; For an execute pattern the target instruction is embedded into the
+;; RTX but will not get checked for validity by recog automatically.
+;; The execute_operation predicate extracts the target RTX and invokes
+;; recog.
+(define_special_predicate "execute_operation"
+ (match_code "parallel")
+{
+ rtx pattern = op;
+ rtx_insn *insn;
+ int icode;
+
+ /* This is redundant but since this predicate is evaluated
+ first when recognizing the insn we can prevent the more
+ expensive code below from being executed for many cases. */
+ if (GET_CODE (XVECEXP (pattern, 0, 0)) != UNSPEC
+ || XINT (XVECEXP (pattern, 0, 0), 1) != UNSPEC_EXECUTE)
+ return false;
+
+ /* Keep in sync with s390_execute_target. */
+ if (XVECLEN (pattern, 0) == 2)
+ {
+ pattern = copy_rtx (XVECEXP (pattern, 0, 1));
+ }
+ else
+ {
+ rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
+ int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
+ RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
+
+ pattern = gen_rtx_PARALLEL (VOIDmode, vec);
+ }
+
+ /* Since we do not have the wrapping insn here we have to build one. */
+ insn = make_insn_raw (pattern);
+ icode = recog_memoized (insn);
+ if (icode < 0)
+ return false;
+
+ extract_constrain_insn (insn);
+
+ return which_alternative >= 0;
+})
+
;; Return true if OP is a store multiple operation. It is known to be a
;; PARALLEL and the first section will be tested.
(define_special_predicate "store_multiple_operation"
(match_code "parallel")
{
- enum machine_mode elt_mode;
+ machine_mode elt_mode;
int count = XVECLEN (op, 0);
unsigned int src_regno;
rtx dest_addr;
}
return true;
})
+
+(define_predicate "const_shift_by_byte_operand"
+ (match_code "const_int")
+{
+ unsigned HOST_WIDE_INT val = INTVAL (op);
+ return val <= 128 && val % 8 == 0;
+})
+
+;; Certain operations (e.g. CS) cannot access SYMBOL_REF directly, it needs to
+;; be loaded into some register first. In theory, if we put a SYMBOL_REF into
+;; a corresponding insn anyway, reload will generate a load for it, but, when
+;; coupled with constant propagation, this will lead to an inefficient code
+;; (see PR 80080).
+
+(define_predicate "nonsym_memory_operand"
+ (match_code "mem")
+{
+ return memory_operand (op, mode) && !contains_symbol_ref_p (op);
+})
+
+;; Check for a valid shift count operand with an implicit
+;; shift truncation mask of 63.
+
+(define_predicate "shift_count_operand"
+ (and (match_code "reg, subreg, and, plus, const_int")
+ (match_test "CONST_INT_P (op) || GET_MODE (op) == E_QImode"))
+{
+ return s390_valid_shift_count (op, 63);
+}
+)
+
+;; This is used as operand predicate. As we do not know
+;; the mode of the first operand here and the shift truncation
+;; mask depends on the mode, we cannot check the mask.
+;; This is supposed to happen in the insn condition which
+;; calls s390_valid_shift_count with the proper mode size.
+;; We need two separate predicates for non-vector and vector
+;; shifts since the (less restrictive) insn condition is checked
+;; after the more restrictive operand predicate which will
+;; disallow the operand before we can check the condition.
+
+(define_predicate "shift_count_operand_vec"
+ (and (match_code "reg, subreg, and, plus, const_int")
+ (match_test "CONST_INT_P (op) || GET_MODE (op) == E_QImode"))
+{
+ return s390_valid_shift_count (op, 0);
+}
+)
+
+; An integer constant which can be used in a signed add with overflow
+; pattern without being reloaded.
+(define_predicate "addv_const_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -32768 && INTVAL (op) <= 32767")))