the vec_dupv4hi pattern.
NB: SSE2 can load 16bit data to sse register via pinsrw. */
int mov_size = MAYBE_SSE_CLASS_P (regclass) && TARGET_SSE2 ? 2 : 4;
- if (GET_MODE_SIZE (from) < mov_size)
+ if (GET_MODE_SIZE (from) < mov_size
+ || GET_MODE_SIZE (to) < mov_size)
return false;
}
|| VALID_AVX512F_SCALAR_MODE (mode)))
return true;
- /* For AVX512FP16, vmovw supports movement of HImode
- and HFmode between GPR and SSE registers. */
- if (TARGET_AVX512FP16
- && VALID_AVX512FP16_SCALAR_MODE (mode))
- return true;
-
/* For AVX-5124FMAPS or AVX-5124VNNIW
allow V64SF and V64SI modes for special regnos. */
if ((TARGET_AVX5124FMAPS || TARGET_AVX5124VNNIW)
if (EXT_REX_SSE_REGNO_P (regno))
return false;
+ /* Use pinsrw/pextrw to mov 16-bit data from/to sse to/from integer. */
+ if (TARGET_SSE2 && mode == HImode)
+ return true;
+
/* OImode and AVX modes are available only when AVX is enabled. */
return ((TARGET_AVX
&& VALID_AVX256_REG_OR_OI_MODE (mode))
(VALID_AVX256_REG_MODE (MODE) || (MODE) == OImode)
#define VALID_AVX512F_SCALAR_MODE(MODE) \
- ((MODE) == DImode || (MODE) == DFmode || (MODE) == SImode \
- || (MODE) == SFmode)
-
-#define VALID_AVX512FP16_SCALAR_MODE(MODE) \
- ((MODE) == HImode || (MODE) == HFmode)
+ ((MODE) == DImode || (MODE) == DFmode \
+ || (MODE) == SImode || (MODE) == SFmode \
+ || (MODE) == HImode || (MODE) == HFmode || (MODE) == BFmode)
#define VALID_AVX512F_REG_MODE(MODE) \
((MODE) == V8DImode || (MODE) == V8DFmode || (MODE) == V64QImode \
|| (MODE) == V8HFmode || (MODE) == V4HFmode || (MODE) == V2HFmode \
|| (MODE) == V8BFmode || (MODE) == V4BFmode || (MODE) == V2BFmode \
|| (MODE) == V4QImode || (MODE) == V2HImode || (MODE) == V1SImode \
- || (MODE) == V2DImode || (MODE) == V2QImode || (MODE) == DFmode \
+ || (MODE) == V2DImode || (MODE) == V2QImode \
+ || (MODE) == DFmode || (MODE) == DImode \
|| (MODE) == HFmode || (MODE) == BFmode)
#define VALID_SSE_REG_MODE(MODE) \
((MODE) == V1TImode || (MODE) == TImode \
|| (MODE) == V4SFmode || (MODE) == V4SImode \
- || (MODE) == SFmode || (MODE) == TFmode || (MODE) == TDmode)
+ || (MODE) == SFmode || (MODE) == SImode \
+ || (MODE) == TFmode || (MODE) == TDmode)
#define VALID_MMX_REG_MODE_3DNOW(MODE) \
((MODE) == V2SFmode || (MODE) == SFmode)
/* { dg-final { scan-assembler-times "vinserti64x4\[ \\t\]+\[^\n\]*%zmm" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu64\[ \\t\]+\[^\n\]*%zmm" 1 } } */
/* No need to dynamically realign the stack here. */
-/* { dg-final { scan-assembler-not "and\[^\n\r]*%\[re\]sp" { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-not "and\[^\n\r]*%\[re\]sp" } } */
/* Nor use a frame pointer. */
-/* { dg-final { scan-assembler-not "%\[re\]bp" { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-not "%\[re\]bp" { xfail ia32 } } } */
/* { dg-final { scan-assembler-times "vpbroadcastb\[ \\t\]+\[^\n\]*%ymm" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu\[ \\t\]+\[^\n\]*%ymm" 2 } } */
/* No need to dynamically realign the stack here. */
-/* { dg-final { scan-assembler-not "and\[^\n\r]*%\[re\]sp" { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-not "and\[^\n\r]*%\[re\]sp" } } */
/* Nor use a frame pointer. */
/* { dg-final { scan-assembler-not "%\[re\]bp" { xfail *-*-* } } } */
/* { dg-final { scan-assembler-not "vinserti64x4" } } */
/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\n\]*%zmm" 1 } } */
/* No need to dynamically realign the stack here. */
-/* { dg-final { scan-assembler-not "and\[^\n\r]*%\[re\]sp" { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-not "and\[^\n\r]*%\[re\]sp" } } */
/* Nor use a frame pointer. */
/* { dg-final { scan-assembler-not "%\[re\]bp" { xfail *-*-* } } } */