#define MODE_d_f16 E_V4HFmode
#define MODE_d_f32 E_V2SFmode
#define MODE_d_f64 E_V1DFmode
+#define MODE_d_mf8 E_V8QImode
#define MODE_d_s8 E_V8QImode
#define MODE_d_s16 E_V4HImode
#define MODE_d_s32 E_V2SImode
#define MODE_q_f16 E_V8HFmode
#define MODE_q_f32 E_V4SFmode
#define MODE_q_f64 E_V2DFmode
+#define MODE_q_mf8 E_V16QImode
#define MODE_q_s8 E_V16QImode
#define MODE_q_s16 E_V8HImode
#define MODE_q_s32 E_V4SImode
#define QUAL_p16 qualifier_poly
#define QUAL_p64 qualifier_poly
#define QUAL_p128 qualifier_poly
+#define QUAL_mf8 qualifier_modal_float
#define LENGTH_d ""
#define LENGTH_q "q"
/* vreinterpret intrinsics are defined for any pair of element types.
{ _bf16 } { _bf16 }
{ _f16 _f32 _f64 } { _f16 _f32 _f64 }
+ { _mf8 } { _mf8 }
{ _s8 _s16 _s32 _s64 } x { _s8 _s16 _s32 _s64 }
{ _u8 _u16 _u32 _u64 } { _u8 _u16 _u32 _u64 }
{ _p8 _p16 _p64 } { _p8 _p16 _p64 }. */
VREINTERPRET_BUILTIN2 (A, f16) \
VREINTERPRET_BUILTIN2 (A, f32) \
VREINTERPRET_BUILTIN2 (A, f64) \
+ VREINTERPRET_BUILTIN2 (A, mf8) \
VREINTERPRET_BUILTIN2 (A, s8) \
VREINTERPRET_BUILTIN2 (A, s16) \
VREINTERPRET_BUILTIN2 (A, s32) \
VREINTERPRET_BUILTINS1 (f16) \
VREINTERPRET_BUILTINS1 (f32) \
VREINTERPRET_BUILTINS1 (f64) \
+ VREINTERPRET_BUILTINS1 (mf8) \
VREINTERPRET_BUILTINS1 (s8) \
VREINTERPRET_BUILTINS1 (s16) \
VREINTERPRET_BUILTINS1 (s32) \
/* vreinterpretq intrinsics are additionally defined for p128.
{ _bf16 } { _bf16 }
{ _f16 _f32 _f64 } { _f16 _f32 _f64 }
+ { _mf8 } { _mf8 }
{ _s8 _s16 _s32 _s64 } x { _s8 _s16 _s32 _s64 }
{ _u8 _u16 _u32 _u64 } { _u8 _u16 _u32 _u64 }
{ _p8 _p16 _p64 _p128 } { _p8 _p16 _p64 _p128 }. */
VREINTERPRETQ_BUILTIN2 (A, f16) \
VREINTERPRETQ_BUILTIN2 (A, f32) \
VREINTERPRETQ_BUILTIN2 (A, f64) \
+ VREINTERPRETQ_BUILTIN2 (A, mf8) \
VREINTERPRETQ_BUILTIN2 (A, s8) \
VREINTERPRETQ_BUILTIN2 (A, s16) \
VREINTERPRETQ_BUILTIN2 (A, s32) \
VREINTERPRETQ_BUILTINS1 (f16) \
VREINTERPRETQ_BUILTINS1 (f32) \
VREINTERPRETQ_BUILTINS1 (f64) \
+ VREINTERPRETQ_BUILTINS1 (mf8) \
VREINTERPRETQ_BUILTINS1 (s8) \
VREINTERPRETQ_BUILTINS1 (s16) \
VREINTERPRETQ_BUILTINS1 (s32) \
{
int i;
int nelts = ARRAY_SIZE (aarch64_simd_types);
- int q = qualifiers & (qualifier_poly | qualifier_unsigned);
+ int q = qualifiers
+ & (qualifier_poly | qualifier_unsigned | qualifier_modal_float);
for (i = 0; i < nelts; i++)
{
--- /dev/null
+/* { dg-do compile { target { aarch64*-*-* } } } */
+
+#include <arm_neon.h>
+
+#define TEST_128(T, S) \
+T test_vreinterpretq_##S##_mf8 (mfloat8x16_t a)\
+{ \
+ return vreinterpretq_##S##_mf8 (a); \
+} \
+ \
+mfloat8x16_t test_vreinterpretq_mf8_##S (T a) \
+{ \
+ return vreinterpretq_mf8_##S (a); \
+}
+
+
+#define TEST_BOTH(T1, T2, S) \
+TEST_128(T2, S) \
+T1 test_vreinterpret_##S##_mf8 (mfloat8x8_t a) \
+{ \
+ return vreinterpret_##S##_mf8 (a); \
+} \
+ \
+mfloat8x8_t test_vreinterpret_mf8_##S (T1 a) \
+{ \
+ return vreinterpret_mf8_##S (a); \
+}
+
+TEST_BOTH(bfloat16x4_t, bfloat16x8_t, bf16)
+TEST_BOTH(float16x4_t, float16x8_t, f16)
+TEST_BOTH(float32x2_t, float32x4_t, f32)
+TEST_BOTH(float64x1_t, float64x2_t, f64)
+TEST_BOTH(poly8x8_t, poly8x16_t, p8)
+TEST_BOTH(poly16x4_t, poly16x8_t, p16)
+TEST_BOTH(poly64x1_t, poly64x2_t, p64)
+TEST_128(poly128_t, p128)
+TEST_BOTH(int8x8_t, int8x16_t, s8)
+TEST_BOTH(int16x4_t, int16x8_t, s16)
+TEST_BOTH(int32x2_t, int32x4_t, s32)
+TEST_BOTH(int64x1_t, int64x2_t, s64)
+TEST_BOTH(uint8x8_t, uint8x16_t, u8)
+TEST_BOTH(uint16x4_t, uint16x8_t, u16)
+TEST_BOTH(uint32x2_t, uint32x4_t, u32)
+TEST_BOTH(uint64x1_t, uint64x2_t, u64)
+
+