]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
rs6000: Simplify some conditions or code related to TARGET_DIRECT_MOVE
authorKewen Lin <linkw@linux.ibm.com>
Thu, 21 Nov 2024 07:41:33 +0000 (07:41 +0000)
committerKewen Lin <linkw@gcc.gnu.org>
Thu, 21 Nov 2024 07:41:33 +0000 (07:41 +0000)
When I was making a patch to rework TARGET_P8_VECTOR, I
noticed that there are some redundant checks and dead code
related to TARGET_DIRECT_MOVE, so I made this patch as one
separated preparatory patch, it consists of:
  - Check either TARGET_DIRECT_MOVE or TARGET_P8_VECTOR only
    according to the context, rather than checking both of
    them since they are actually the same (TARGET_DIRECT_MOVE
    is defined as TARGET_P8_VECTOR).
  - Simplify TARGET_VSX && TARGET_DIRECT_MOVE as
    TARGET_DIRECT_MOVE since direct move ensures VSX enabled.
  - Replace some TARGET_POWERPC64 && TARGET_DIRECT_MOVE as
    TARGET_DIRECT_MOVE_64BIT to simplify it.
  - Remove some dead code guarded with TARGET_DIRECT_MOVE
    but the condition never holds here.

gcc/ChangeLog:

* config/rs6000/rs6000.cc (rs6000_option_override_internal): Simplify
TARGET_P8_VECTOR && TARGET_DIRECT_MOVE as TARGET_P8_VECTOR.
(rs6000_output_move_128bit): Simplify TARGET_VSX && TARGET_DIRECT_MOVE
as TARGET_DIRECT_MOVE.
* config/rs6000/rs6000.h (TARGET_XSCVDPSPN): Simplify conditions
TARGET_DIRECT_MOVE || TARGET_P8_VECTOR as TARGET_P8_VECTOR.
(TARGET_XSCVSPDPN): Likewise.
(TARGET_DIRECT_MOVE_128): Simplify TARGET_DIRECT_MOVE &&
TARGET_POWERPC64 as TARGET_DIRECT_MOVE_64BIT.
(TARGET_VEXTRACTUB): Likewise.
(TARGET_DIRECT_MOVE_64BIT): Simplify TARGET_P8_VECTOR &&
TARGET_DIRECT_MOVE as TARGET_DIRECT_MOVE.
* config/rs6000/rs6000.md (signbit<mode>2, @signbit<mode>2_dm,
*signbit<mode>2_dm_mem, floatsi<mode>2_lfiwax,
floatsi<SFDF:mode>2_lfiwax_<QHI:mode>_mem_zext,
floatunssi<mode>2_lfiwzx, float<QHI:mode><SFDF:mode>2,
*float<QHI:mode><SFDF:mode>2_internal, floatuns<QHI:mode><SFDF:mode>2,
*floatuns<QHI:mode><SFDF:mode>2_internal, p8_mtvsrd_v16qidi2,
p8_mtvsrd_df, p8_xxpermdi_<mode>, reload_vsx_from_gpr<mode>,
p8_mtvsrd_sf, reload_vsx_from_gprsf, p8_mfvsrd_3_<mode>,
reload_gpr_from_vsx<mode>, reload_gpr_from_vsxsf, unpack<mode>_dm):
Simplify TARGET_DIRECT_MOVE && TARGET_POWERPC64 as
TARGET_DIRECT_MOVE_64BIT.
(unpack<mode>_nodm): Simplify !TARGET_DIRECT_MOVE || !TARGET_POWERPC64
as !TARGET_DIRECT_MOVE_64BIT.
(fix_trunc<mode>si2, fix_trunc<mode>si2_stfiwx,
fix_trunc<mode>si2_internal): Simplify TARGET_P8_VECTOR &&
TARGET_DIRECT_MOVE as TARGET_DIRECT_MOVE.
(fix_trunc<mode>si2_stfiwx, fixuns_trunc<mode>si2_stfiwx): Remove some
dead code as the guard TARGET_DIRECT_MOVE there never holds.
(fixuns_trunc<mode>si2_stfiwx): Change TARGET_P8_VECTOR with
TARGET_DIRECT_MOVE which is a better fit.
* config/rs6000/vsx.md (define_peephole2 for SFmode in GPR): Simplify
TARGET_DIRECT_MOVE && TARGET_POWERPC64 as TARGET_DIRECT_MOVE_64BIT.

gcc/config/rs6000/rs6000.cc
gcc/config/rs6000/rs6000.h
gcc/config/rs6000/rs6000.md
gcc/config/rs6000/vsx.md

index 0d7ee1e5bdf2c372f3fad33e79117ed7ae0d5aa6..9cdf704824ce1c79dfa2be40c002de14e8ad5a99 100644 (file)
@@ -4055,7 +4055,7 @@ rs6000_option_override_internal (bool global_init_p)
      support. If we only have ISA 2.06 support, and the user did not specify
      the switch, leave it set to -1 so the movmisalign patterns are enabled,
      but we don't enable the full vectorization support  */
-  if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
+  if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR)
     TARGET_ALLOW_MOVMISALIGN = 1;
 
   else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
@@ -13799,7 +13799,7 @@ rs6000_output_move_128bit (rtx operands[])
                    ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
                    : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
 
-         else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
+         else if (TARGET_DIRECT_MOVE && src_vsx_p)
            return "#";
        }
 
index d460eb0654486330c68e8417f81eb28f8cd935d4..e0c41e1dfd26b510bc763a57fa86d7225168ca22 100644 (file)
@@ -469,13 +469,11 @@ extern int rs6000_vector_align[];
 
 /* TARGET_DIRECT_MOVE is redundant to TARGET_P8_VECTOR, so alias it to that.  */
 #define TARGET_DIRECT_MOVE     TARGET_P8_VECTOR
-#define TARGET_XSCVDPSPN       (TARGET_DIRECT_MOVE || TARGET_P8_VECTOR)
-#define TARGET_XSCVSPDPN       (TARGET_DIRECT_MOVE || TARGET_P8_VECTOR)
+#define TARGET_XSCVDPSPN       TARGET_P8_VECTOR
+#define TARGET_XSCVSPDPN       TARGET_P8_VECTOR
 #define TARGET_VADDUQM         (TARGET_P8_VECTOR && TARGET_POWERPC64)
-#define TARGET_DIRECT_MOVE_128 (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE \
-                                && TARGET_POWERPC64)
-#define TARGET_VEXTRACTUB      (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE \
-                                && TARGET_POWERPC64)
+#define TARGET_DIRECT_MOVE_128 (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT)
+#define TARGET_VEXTRACTUB      (TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT)
 
 /* Whether we should avoid (SUBREG:SI (REG:SF) and (SUBREG:SF (REG:SI).  */
 #define TARGET_NO_SF_SUBREG    TARGET_DIRECT_MOVE_64BIT
@@ -555,7 +553,6 @@ extern int rs6000_vector_align[];
    the calculation in 64-bit GPRs and then is transfered to the vector
    registers.  */
 #define TARGET_DIRECT_MOVE_64BIT       (TARGET_DIRECT_MOVE             \
-                                        && TARGET_P8_VECTOR            \
                                         && TARGET_POWERPC64)
 
 /* Inlining allows targets to define the meanings of bits in target_info
index 8eda2f7bb0d751114a57391f9f22e99b7ec66c97..2598059280bf1666cf397d650ca26d1d717d7531 100644 (file)
        (match_dup 6))]
   "TARGET_HARD_FLOAT
    && (!FLOAT128_IEEE_P (<MODE>mode)
-       || (TARGET_POWERPC64 && TARGET_DIRECT_MOVE))"
+       || TARGET_DIRECT_MOVE_64BIT)"
 {
   if (FLOAT128_IEEE_P (<MODE>mode))
     {
   [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
        (unspec:DI [(match_operand:SIGNBIT 1 "gpc_reg_operand" "wa,r")]
                   UNSPEC_SIGNBIT))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "@
    mfvsrd %0,%x1
    #"
   [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
        (unspec:DI [(match_operand:SIGNBIT 1 "memory_operand" "m")]
                   UNSPEC_SIGNBIT))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& 1"
   [(set (match_dup 0)
   rtx src = operands[1];
   rtx tmp;
 
-  if (!MEM_P (src) && TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
+  if (!MEM_P (src) && TARGET_DIRECT_MOVE_64BIT)
     tmp = convert_to_mode (DImode, src, false);
   else
     {
          (match_operand:QHI 1 "indexed_or_indirect_operand" "Z,Z"))))
    (clobber (match_scratch:DI 2 "=d,wa"))]
   "TARGET_HARD_FLOAT && <SI_CONVERT_FP> && TARGET_P9_VECTOR
-   && TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+   && TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& 1"
   [(pc)]
   rtx src = operands[1];
   rtx tmp;
 
-  if (!MEM_P (src) && TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
+  if (!MEM_P (src) && TARGET_DIRECT_MOVE_64BIT)
     tmp = convert_to_mode (DImode, src, true);
   else
     {
              (clobber (match_scratch:DI 2))
              (clobber (match_scratch:DI 3))
              (clobber (match_scratch:<QHI:MODE> 4))])]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
 {
   if (MEM_P (operands[1]))
     operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
    (clobber (match_scratch:DI 2 "=v,wa,v"))
    (clobber (match_scratch:DI 3 "=X,r,X"))
    (clobber (match_scratch:<QHI:MODE> 4 "=X,X,v"))]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
                    (match_operand:QHI 1 "input_operand")))
              (clobber (match_scratch:DI 2))
              (clobber (match_scratch:DI 3))])]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
 {
   if (MEM_P (operands[1]))
     operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
         (match_operand:QHI 1 "reg_or_indexed_operand" "v,r,Z")))
    (clobber (match_scratch:DI 2 "=v,wa,wa"))
    (clobber (match_scratch:DI 3 "=X,r,X"))]
-  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
+  "TARGET_P9_VECTOR && TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
        (fix:SI (match_operand:SFDF 1 "gpc_reg_operand")))]
   "TARGET_HARD_FLOAT"
 {
-  if (!(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE))
+  if (!TARGET_DIRECT_MOVE)
     {
       rtx src = force_reg (<MODE>mode, operands[1]);
 
        (fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d")))
    (clobber (match_scratch:DI 2 "=d"))]
   "TARGET_HARD_FLOAT && TARGET_STFIWX && can_create_pseudo_p ()
-   && !(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)"
+   && !TARGET_DIRECT_MOVE"
   "#"
   "&& 1"
   [(pc)]
       emit_insn (gen_stfiwx (dest, tmp));
       DONE;
     }
-  else if (TARGET_POWERPC64 && TARGET_DIRECT_MOVE && !MEM_P (dest))
-    {
-      dest = gen_lowpart (DImode, dest);
-      emit_move_insn (dest, tmp);
-      DONE;
-    }
   else
     {
       rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
    (clobber (match_operand:DI 2 "gpc_reg_operand" "=1,d"))
    (clobber (match_operand:DI 3 "offsettable_mem_operand" "=o,o"))]
   "TARGET_HARD_FLOAT
-   && !(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)"
+   && !TARGET_DIRECT_MOVE"
   "#"
   "&& 1"
   [(pc)]
    (clobber (match_scratch:DI 2 "=d"))]
   "TARGET_HARD_FLOAT && TARGET_FCTIWUZ
    && TARGET_STFIWX && can_create_pseudo_p ()
-   && !TARGET_P8_VECTOR"
+   && !TARGET_DIRECT_MOVE"
   "#"
   "&& 1"
   [(pc)]
       emit_insn (gen_stfiwx (dest, tmp));
       DONE;
     }
-  else if (TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
-    {
-      dest = gen_lowpart (DImode, dest);
-      emit_move_insn (dest, tmp);
-      DONE;
-    }
   else
     {
       rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
   [(set (match_operand:V16QI 0 "register_operand" "=wa")
     (unspec:V16QI [(match_operand:DI 1 "register_operand" "r")]
                  UNSPEC_P8V_MTVSRD))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mtvsrd %x0,%1"
   [(set_attr "type" "mtvsr")])
 
   [(set (match_operand:DF 0 "register_operand" "=wa")
        (unspec:DF [(match_operand:DI 1 "register_operand" "r")]
                   UNSPEC_P8V_MTVSRD))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mtvsrd %x0,%1"
   [(set_attr "type" "mtvsr")])
 
                (match_operand:DF 1 "register_operand" "wa")
                (match_operand:DF 2 "register_operand" "wa")]
                UNSPEC_P8V_XXPERMDI))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "xxpermdi %x0,%x1,%x2,0"
   [(set_attr "type" "vecperm")])
 
         [(match_operand:FMOVE128_GPR 1 "register_operand" "r")]
         UNSPEC_P8V_RELOAD_FROM_GPR))
    (clobber (match_operand:IF 2 "register_operand" "=wa"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
   [(set (match_operand:SF 0 "register_operand" "=wa")
        (unspec:SF [(match_operand:DI 1 "register_operand" "r")]
                   UNSPEC_P8V_MTVSRD))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mtvsrd %x0,%1"
   [(set_attr "type" "mtvsr")])
 
        (unspec:SF [(match_operand:SF 1 "register_operand" "r")]
                   UNSPEC_P8V_RELOAD_FROM_GPR))
    (clobber (match_operand:DI 2 "register_operand" "=r"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
   [(set (match_operand:DF 0 "register_operand" "=r")
        (unspec:DF [(match_operand:FMOVE128_GPR 1 "register_operand" "wa")]
                   UNSPEC_P8V_RELOAD_FROM_VSX))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "mfvsrd %0,%x1"
   [(set_attr "type" "mfvsr")])
 
         [(match_operand:FMOVE128_GPR 1 "register_operand" "wa")]
         UNSPEC_P8V_RELOAD_FROM_VSX))
    (clobber (match_operand:FMOVE128_GPR 2 "register_operand" "=wa"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
        (unspec:SF [(match_operand:SF 1 "register_operand" "wa")]
                   UNSPEC_P8V_RELOAD_FROM_VSX))
    (clobber (match_operand:V4SF 2 "register_operand" "=wa"))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
+  "TARGET_DIRECT_MOVE_64BIT"
   "#"
   "&& reload_completed"
   [(const_int 0)]
         [(match_operand:FMOVE128 1 "register_operand" "d,d,r,d,r")
          (match_operand:QI 2 "const_0_to_1_operand" "i,i,i,i,i")]
         UNSPEC_UNPACK_128BIT))]
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE && FLOAT128_2REG_P (<MODE>mode)"
+  "TARGET_DIRECT_MOVE_64BIT && FLOAT128_2REG_P (<MODE>mode)"
   "#"
   "&& reload_completed"
   [(set (match_dup 0) (match_dup 3))]
         [(match_operand:FMOVE128 1 "register_operand" "d,d,r")
          (match_operand:QI 2 "const_0_to_1_operand" "i,i,i")]
         UNSPEC_UNPACK_128BIT))]
-  "(!TARGET_POWERPC64 || !TARGET_DIRECT_MOVE) && FLOAT128_2REG_P (<MODE>mode)"
+  "!TARGET_DIRECT_MOVE_64BIT && FLOAT128_2REG_P (<MODE>mode)"
   "#"
   "&& reload_completed"
   [(set (match_dup 0) (match_dup 3))]
index b2fc39acf4e84148fdb04fc53eddd51eaed77b1e..f4f7113f5fe8efdd1a4a3093e3fdfe6a149f2060 100644 (file)
    (set (match_operand:SF SFBOOL_MTVSR_D "vsx_register_operand")
        (unspec:SF [(match_dup SFBOOL_SHL_D)] UNSPEC_P8V_MTVSRD))]
 
-  "TARGET_POWERPC64 && TARGET_DIRECT_MOVE
+  "TARGET_DIRECT_MOVE_64BIT
    /* The REG_P (xxx) tests prevents SUBREG's, which allows us to use REGNO
       to compare registers, when the mode is different.  */
    && REG_P (operands[SFBOOL_MFVSR_D]) && REG_P (operands[SFBOOL_BOOL_D])