From: Segher Boessenkool Date: Tue, 4 Oct 2022 02:50:22 +0000 (+0000) Subject: rs6000: Rework vsx_extract_ X-Git-Tag: basepoints/gcc-14~4108 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ba3e5a3826be53ecbb7d6044c50878d44640c296;p=thirdparty%2Fgcc.git rs6000: Rework vsx_extract_ Extracting the left and right halfs of a vector are entirely different operations. Things are simpler if they are separate define_insns, and it is easy to get rid of the "wD" constraint use then. This also give the variant that is a no-op copy its own alternative, of length 0 (and this, cost 0, making it more likely RA will choose it. 2022-10-05 Segher Boessenkool * config/rs6000/vsx.md (vsx_extract_): Replace define_insn by a define_expand. Split the contents to... (*vsx_extract__0): ... this. Rewrite. (*vsx_extract__1): ... and this. Rewrite. --- diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md index 79a759b1ccf3..e0e34a78bca1 100644 --- a/gcc/config/rs6000/vsx.md +++ b/gcc/config/rs6000/vsx.md @@ -3388,59 +3388,53 @@ ;; Optimize cases were we can do a simple or direct move. ;; Or see if we can avoid doing the move at all -(define_insn "vsx_extract_" - [(set (match_operand: 0 "gpc_reg_operand" "=wa, wa, wr, wr") +(define_expand "vsx_extract_" + [(set (match_operand: 0 "gpc_reg_operand") (vec_select: - (match_operand:VSX_D 1 "gpc_reg_operand" "wa, wa, wa, wa") + (match_operand:VSX_D 1 "gpc_reg_operand") (parallel - [(match_operand:QI 2 "const_0_to_1_operand" "wD, n, wD, n")])))] + [(match_operand:QI 2 "const_0_to_1_operand")])))] "VECTOR_MEM_VSX_P (mode)" -{ - int element = INTVAL (operands[2]); - int op0_regno = REGNO (operands[0]); - int op1_regno = REGNO (operands[1]); - int fldDM; - - gcc_assert (IN_RANGE (element, 0, 1)); - gcc_assert (VSX_REGNO_P (op1_regno)); - - if (element == VECTOR_ELEMENT_SCALAR_64BIT) - { - if (op0_regno == op1_regno) - return ASM_COMMENT_START " vec_extract to same register"; - - else if (INT_REGNO_P (op0_regno) && TARGET_DIRECT_MOVE - && TARGET_POWERPC64) - return "mfvsrd %0,%x1"; + "") - else if (FP_REGNO_P (op0_regno) && FP_REGNO_P (op1_regno)) - return "fmr %0,%1"; +(define_insn "*vsx_extract__0" + [(set (match_operand: 0 "gpc_reg_operand" "=wa,wa,wr") + (vec_select: + (match_operand:VSX_D 1 "gpc_reg_operand" "0,wa,wa") + (parallel + [(match_operand:QI 2 "const_0_to_1_operand" "n,n,n")])))] + "VECTOR_MEM_VSX_P (mode) + && INTVAL (operands[2]) == (BYTES_BIG_ENDIAN ? 0 : 1)" +{ + if (which_alternative == 0) + return ASM_COMMENT_START " vec_extract to same register"; - else if (VSX_REGNO_P (op0_regno)) - return "xxlor %x0,%x1,%x1"; + if (which_alternative == 2) + return "mfvsrd %0,%x1"; - else - gcc_unreachable (); - } + return "xxlor %x0,%x1,%x1"; +} + [(set_attr "type" "*,veclogical,mfvsr") + (set_attr "isa" "*,*,p8v") + (set_attr "length" "0,*,*")]) - else if (element == VECTOR_ELEMENT_MFVSRLD_64BIT && INT_REGNO_P (op0_regno) - && TARGET_P9_VECTOR && TARGET_POWERPC64 && TARGET_DIRECT_MOVE) +(define_insn "*vsx_extract__1" + [(set (match_operand: 0 "gpc_reg_operand" "=wa,wr") + (vec_select: + (match_operand:VSX_D 1 "gpc_reg_operand" "wa,wa") + (parallel + [(match_operand:QI 2 "const_0_to_1_operand" "n,n")])))] + "VECTOR_MEM_VSX_P (mode) + && INTVAL (operands[2]) == (BYTES_BIG_ENDIAN ? 1 : 0)" +{ + if (which_alternative == 1) return "mfvsrld %0,%x1"; - else if (VSX_REGNO_P (op0_regno)) - { - fldDM = element << 1; - if (!BYTES_BIG_ENDIAN) - fldDM = 3 - fldDM; - operands[3] = GEN_INT (fldDM); - return "xxpermdi %x0,%x1,%x1,%3"; - } - - else - gcc_unreachable (); + operands[3] = GEN_INT (BYTES_BIG_ENDIAN ? 2 : 3); + return "xxpermdi %x0,%x1,%x1,%3"; } - [(set_attr "type" "veclogical,mfvsr,mfvsr,vecperm") - (set_attr "isa" "*,*,p8v,p9v")]) + [(set_attr "type" "mfvsr,vecperm") + (set_attr "isa" "*,p9v")]) ;; Optimize extracting a single scalar element from memory. (define_insn_and_split "*vsx_extract___load"