]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
sse.md (avx512f_load<mode>_mask): Emit vmovup{s,d} or vmovdqu* for misaligned_operand.
authorJakub Jelinek <jakub@gcc.gnu.org>
Sat, 4 Jan 2014 09:57:36 +0000 (10:57 +0100)
committerJakub Jelinek <jakub@gcc.gnu.org>
Sat, 4 Jan 2014 09:57:36 +0000 (10:57 +0100)
* config/i386/sse.md (avx512f_load<mode>_mask): Emit vmovup{s,d}
or vmovdqu* for misaligned_operand.
(<sse>_loadu<ssemodesuffix><avxsizesuffix><mask_name>,
<sse2_avx_avx512f>_loaddqu<mode><mask_name>): Handle <mask_applied>.
* config/i386/i386.c (ix86_expand_special_args_builtin): Set
aligned_mem for AVX512F masked aligned load and store builtins and for
non-temporal moves.

* gcc.target/i386/avx512f-vmovdqu32-1.c: Allow vmovdqu64 instead of
vmovdqu32.

From-SVN: r206332

gcc/ChangeLog
gcc/config/i386/i386.c
gcc/config/i386/sse.md
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/i386/avx512f-vmovdqu32-1.c

index cf4cfabeb8200340b951e203b8df705fa641a13f..caae1f6f473acfdf6277ddab9d494bc239e7b6b0 100644 (file)
@@ -1,4 +1,14 @@
-2014-01-03  Bingfeng Mei <bmei@broadcom.com>
+2014-01-04  Jakub Jelinek  <jakub@redhat.com>
+
+       * config/i386/sse.md (avx512f_load<mode>_mask): Emit vmovup{s,d}
+       or vmovdqu* for misaligned_operand.
+       (<sse>_loadu<ssemodesuffix><avxsizesuffix><mask_name>,
+       <sse2_avx_avx512f>_loaddqu<mode><mask_name>): Handle <mask_applied>.
+       * config/i386/i386.c (ix86_expand_special_args_builtin): Set
+       aligned_mem for AVX512F masked aligned load and store builtins and for
+       non-temporal moves.
+
+2014-01-03  Bingfeng Mei  <bmei@broadcom.com>
 
        PR tree-optimization/59651
        * tree-vect-loop-manip.c (vect_create_cond_for_alias_checks): 
index d2f5b6e9fda70a9f984284b06be6aa96da08cc0a..1fc68e144bbe6d5001722892cdbe0b328986dc3f 100644 (file)
@@ -34407,6 +34407,9 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
        case CODE_FOR_sse2_movntidi:
        case CODE_FOR_sse_movntq:
        case CODE_FOR_sse2_movntisi:
+       case CODE_FOR_avx512f_movntv16sf:
+       case CODE_FOR_avx512f_movntv8df:
+       case CODE_FOR_avx512f_movntv8di:
          aligned_mem = true;
          break;
        default:
@@ -34431,6 +34434,24 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
       klass = load;
       memory = 0;
       break;
+    case VOID_FTYPE_PV8DF_V8DF_QI:
+    case VOID_FTYPE_PV16SF_V16SF_HI:
+    case VOID_FTYPE_PV8DI_V8DI_QI:
+    case VOID_FTYPE_PV16SI_V16SI_HI:
+      switch (icode)
+       {
+       /* These builtins and instructions require the memory
+          to be properly aligned.  */
+       case CODE_FOR_avx512f_storev16sf_mask:
+       case CODE_FOR_avx512f_storev16si_mask:
+       case CODE_FOR_avx512f_storev8df_mask:
+       case CODE_FOR_avx512f_storev8di_mask:
+         aligned_mem = true;
+         break;
+       default:
+         break;
+       }
+      /* FALLTHRU */
     case VOID_FTYPE_PV8SF_V8SI_V8SF:
     case VOID_FTYPE_PV4DF_V4DI_V4DF:
     case VOID_FTYPE_PV4SF_V4SI_V4SF:
@@ -34439,10 +34460,6 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
     case VOID_FTYPE_PV4DI_V4DI_V4DI:
     case VOID_FTYPE_PV4SI_V4SI_V4SI:
     case VOID_FTYPE_PV2DI_V2DI_V2DI:
-    case VOID_FTYPE_PV8DF_V8DF_QI:
-    case VOID_FTYPE_PV16SF_V16SF_HI:
-    case VOID_FTYPE_PV8DI_V8DI_QI:
-    case VOID_FTYPE_PV16SI_V16SI_HI:
     case VOID_FTYPE_PDOUBLE_V2DF_QI:
     case VOID_FTYPE_PFLOAT_V4SF_QI:
       nargs = 2;
@@ -34459,6 +34476,19 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
       nargs = 3;
       klass = load;
       memory = 0;
+      switch (icode)
+       {
+       /* These builtins and instructions require the memory
+          to be properly aligned.  */
+       case CODE_FOR_avx512f_loadv16sf_mask:
+       case CODE_FOR_avx512f_loadv16si_mask:
+       case CODE_FOR_avx512f_loadv8df_mask:
+       case CODE_FOR_avx512f_loadv8di_mask:
+         aligned_mem = true;
+         break;
+       default:
+         break;
+       }
       break;
     case VOID_FTYPE_UINT_UINT_UINT:
     case VOID_FTYPE_UINT64_UINT_UINT:
index 405f9988d9bf410688a9a797a1bd8725abe5fc93..dfc98ba813adfcf9612ecc6109c9fc793c2f1ef9 100644 (file)
     {
     case MODE_V8DF:
     case MODE_V16SF:
+      if (misaligned_operand (operands[1], <MODE>mode))
+       return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
       return "vmova<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
     default:
+      if (misaligned_operand (operands[1], <MODE>mode))
+       return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
       return "vmovdqa<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
     }
 }
      false, still emit UNSPEC_LOADU insn to honor user's request for
      misaligned load.  */
   if (TARGET_AVX
-      && misaligned_operand (operands[1], <MODE>mode)
-      /* FIXME: Revisit after AVX512F merge is completed.  */
-      && !<mask_applied>)
+      && misaligned_operand (operands[1], <MODE>mode))
     {
-      emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+      rtx src = operands[1];
+      if (<mask_applied>)
+       src = gen_rtx_VEC_MERGE (<MODE>mode, operands[1],
+                                operands[2 * <mask_applied>],
+                                operands[3 * <mask_applied>]);
+      emit_insn (gen_rtx_SET (VOIDmode, operands[0], src));
       DONE;
     }
 })
      false, still emit UNSPEC_LOADU insn to honor user's request for
      misaligned load.  */
   if (TARGET_AVX
-      && misaligned_operand (operands[1], <MODE>mode)
-      /* FIXME: Revisit after AVX512F merge is completed.  */
-      && !<mask_applied>)
+      && misaligned_operand (operands[1], <MODE>mode))
     {
-      emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+      rtx src = operands[1];
+      if (<mask_applied>)
+       src = gen_rtx_VEC_MERGE (<MODE>mode, operands[1],
+                                operands[2 * <mask_applied>],
+                                operands[3 * <mask_applied>]);
+      emit_insn (gen_rtx_SET (VOIDmode, operands[0], src));
       DONE;
     }
 })
index 8a9c0cbaf0e5597b6680e3e5822de113021de8b8..267bcc0c7c603e9fd2f2b101247a3d8817fdbd69 100644 (file)
@@ -1,3 +1,8 @@
+2014-01-04  Jakub Jelinek  <jakub@redhat.com>
+
+       * gcc.target/i386/avx512f-vmovdqu32-1.c: Allow vmovdqu64 instead of
+       vmovdqu32.
+
 2014-01-04  Janus Weil  <janus@gcc.gnu.org>
 
        PR fortran/59547
index b8af781834e82fd2a2e26139491f82862d53944c..79dbf9dd37a0b55cd074972bf8dbd75318f15887 100644 (file)
@@ -1,6 +1,6 @@
 /* { dg-do compile } */
 /* { dg-options "-mavx512f -O2" } */
-/* { dg-final { scan-assembler-times "vmovdqu32\[ \\t\]+\[^\n\]*\\)\[^\n\]*%zmm\[0-9\]\[^\{\]" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu\[36\]\[24\]\[ \\t\]+\[^\n\]*\\)\[^\n\]*%zmm\[0-9\]\[^\{\]" 1 } } */
 /* { dg-final { scan-assembler-times "vmovdqu32\[ \\t\]+\[^\n\]*\\)\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 1 } } */
 /* { dg-final { scan-assembler-times "vmovdqu32\[ \\t\]+\[^\n\]*\\)\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 1 } } */
 /* { dg-final { scan-assembler-times "vmovdqu32\[ \\t\]+\[^\n\]*%zmm\[0-9\]\[^\n\]*\\)\[^\{\]" 1 } } */