]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
cleanup: Change condition order
authorJuzhe-Zhong <juzhe.zhong@rivai.ai>
Fri, 21 Jul 2023 02:23:43 +0000 (10:23 +0800)
committerLehua Ding <lehua.ding@rivai.ai>
Fri, 21 Jul 2023 08:34:50 +0000 (16:34 +0800)
Hi, Richard and Richi.

I have double check the recent codes for len && mask support again.

Some places code structure:

if (len_mask_fn)
...
else if (mask_fn)
...

some places code structure:

if (mask_len_fn)
...
else if (mask)

Base on previous review comment from Richi:
https://gcc.gnu.org/pipermail/gcc-patches/2023-July/625067.html

len mask stuff should be checked before mask.

So I reorder all condition order to check LEN MASK stuff before MASK.

This is the last clean up patch.

Boostrap and Regression is on the way.

gcc/ChangeLog:

* tree-vect-stmts.cc (check_load_store_for_partial_vectors): Change condition order.
(vectorizable_operation): Ditto.

gcc/tree-vect-stmts.cc

index 2555958ab8f37a54c3ca5616aa2bab88e480177b..ed28fbdced33897ecb72ca9e06ddc6f424b563d6 100644 (file)
@@ -1635,17 +1635,17 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
       internal_fn len_ifn = (is_load
                             ? IFN_MASK_LEN_GATHER_LOAD
                             : IFN_MASK_LEN_SCATTER_STORE);
-      if (internal_gather_scatter_fn_supported_p (ifn, vectype,
+      if (internal_gather_scatter_fn_supported_p (len_ifn, vectype,
                                                  gs_info->memory_type,
                                                  gs_info->offset_vectype,
                                                  gs_info->scale))
-       vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
-                              scalar_mask);
-      else if (internal_gather_scatter_fn_supported_p (len_ifn, vectype,
+       vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, 1);
+      else if (internal_gather_scatter_fn_supported_p (ifn, vectype,
                                                       gs_info->memory_type,
                                                       gs_info->offset_vectype,
                                                       gs_info->scale))
-       vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, 1);
+       vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
+                              scalar_mask);
       else
        {
          if (dump_enabled_p ())
@@ -6598,16 +6598,16 @@ vectorizable_operation (vec_info *vinfo,
          && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
          && mask_out_inactive)
        {
-         if (cond_fn != IFN_LAST
-             && direct_internal_fn_supported_p (cond_fn, vectype,
+         if (cond_len_fn != IFN_LAST
+             && direct_internal_fn_supported_p (cond_len_fn, vectype,
                                                 OPTIMIZE_FOR_SPEED))
-           vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
-                                  vectype, NULL);
-         else if (cond_len_fn != IFN_LAST
-                  && direct_internal_fn_supported_p (cond_len_fn, vectype,
-                                                     OPTIMIZE_FOR_SPEED))
            vect_record_loop_len (loop_vinfo, lens, ncopies * vec_num, vectype,
                                  1);
+         else if (cond_fn != IFN_LAST
+                  && direct_internal_fn_supported_p (cond_fn, vectype,
+                                                     OPTIMIZE_FOR_SPEED))
+           vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
+                                  vectype, NULL);
          else
            {
              if (dump_enabled_p ())