#ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE
#define vst4q(__addr, __value) __arm_vst4q(__addr, __value)
-#define vldrhq_gather_shifted_offset(__base, __offset) __arm_vldrhq_gather_shifted_offset(__base, __offset)
-#define vldrhq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z(__base, __offset, __p)
-#define vldrdq_gather_shifted_offset(__base, __offset) __arm_vldrdq_gather_shifted_offset(__base, __offset)
-#define vldrdq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z(__base, __offset, __p)
-#define vldrwq_gather_shifted_offset(__base, __offset) __arm_vldrwq_gather_shifted_offset(__base, __offset)
-#define vldrwq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z(__base, __offset, __p)
#define vuninitializedq(__v) __arm_vuninitializedq(__v)
#define vst2q(__addr, __value) __arm_vst2q(__addr, __value)
#define vld2q(__addr) __arm_vld2q(__addr)
#define vldrwq_gather_base_u32(__addr, __offset) __arm_vldrwq_gather_base_u32(__addr, __offset)
#define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p)
#define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p)
-#define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset)
-#define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset)
-#define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset)
-#define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset)
-#define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p)
-#define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p)
-#define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p)
-#define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p)
#define vldrdq_gather_base_s64(__addr, __offset) __arm_vldrdq_gather_base_s64(__addr, __offset)
#define vldrdq_gather_base_u64(__addr, __offset) __arm_vldrdq_gather_base_u64(__addr, __offset)
#define vldrdq_gather_base_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr, __offset, __p)
#define vldrdq_gather_base_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr, __offset, __p)
-#define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset)
-#define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset)
-#define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p)
-#define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p)
-#define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset)
-#define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p)
#define vldrwq_gather_base_f32(__addr, __offset) __arm_vldrwq_gather_base_f32(__addr, __offset)
#define vldrwq_gather_base_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr, __offset, __p)
-#define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset)
-#define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset)
-#define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset)
-#define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p)
-#define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p)
-#define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p)
#define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
#define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
#define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
-}
-
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset)
return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p);
}
-__extension__ extern __inline int64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset)
-{
- return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset);
-}
-
-__extension__ extern __inline uint64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
-{
- return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset);
-}
-
-__extension__ extern __inline int64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
-}
-
-__extension__ extern __inline uint64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset)
-{
- return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
-{
- return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
-}
-
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset)
__builtin_mve_vst4qv4sf (__addr, __rv.__o);
}
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf ((__builtin_neon_hi *) __base, __offset, __p);
-}
-
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset)
return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p);
}
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset)
-{
- return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf ((__builtin_neon_si *) __base, __offset, __p);
-}
-
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset)
__arm_vst4q_u32 (__addr, __value);
}
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint32x4_t __offset)
-{
- return __arm_vldrhq_gather_shifted_offset_s32 (__base, __offset);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint16x8_t __offset)
-{
- return __arm_vldrhq_gather_shifted_offset_s16 (__base, __offset);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint32x4_t __offset)
-{
- return __arm_vldrhq_gather_shifted_offset_u32 (__base, __offset);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint16x8_t __offset)
-{
- return __arm_vldrhq_gather_shifted_offset_u16 (__base, __offset);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrhq_gather_shifted_offset_z_s32 (__base, __offset, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrhq_gather_shifted_offset_z_s16 (__base, __offset, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrhq_gather_shifted_offset_z_u32 (__base, __offset, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrhq_gather_shifted_offset_z_u16 (__base, __offset, __p);
-}
-
-__extension__ extern __inline int64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset (int64_t const * __base, uint64x2_t __offset)
-{
- return __arm_vldrdq_gather_shifted_offset_s64 (__base, __offset);
-}
-
-__extension__ extern __inline uint64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset (uint64_t const * __base, uint64x2_t __offset)
-{
- return __arm_vldrdq_gather_shifted_offset_u64 (__base, __offset);
-}
-
-__extension__ extern __inline int64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrdq_gather_shifted_offset_z_s64 (__base, __offset, __p);
-}
-
-__extension__ extern __inline uint64x2_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrdq_gather_shifted_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrdq_gather_shifted_offset_z_u64 (__base, __offset, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset (int32_t const * __base, uint32x4_t __offset)
-{
- return __arm_vldrwq_gather_shifted_offset_s32 (__base, __offset);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset (uint32_t const * __base, uint32x4_t __offset)
-{
- return __arm_vldrwq_gather_shifted_offset_u32 (__base, __offset);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrwq_gather_shifted_offset_z_s32 (__base, __offset, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrwq_gather_shifted_offset_z_u32 (__base, __offset, __p);
-}
-
__extension__ extern __inline void
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vst2q (int8_t * __addr, int8x16x2_t __value)
__arm_vst4q_f32 (__addr, __value);
}
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset (float16_t const * __base, uint16x8_t __offset)
-{
- return __arm_vldrhq_gather_shifted_offset_f16 (__base, __offset);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrhq_gather_shifted_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrhq_gather_shifted_offset_z_f16 (__base, __offset, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset (float32_t const * __base, uint32x4_t __offset)
-{
- return __arm_vldrwq_gather_shifted_offset_f32 (__base, __offset);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vldrwq_gather_shifted_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
-{
- return __arm_vldrwq_gather_shifted_offset_z_f32 (__base, __offset, __p);
-}
-
__extension__ extern __inline float16x8x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vld4q (float16_t const * __addr)
int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
-#define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
-
-#define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
- int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
-
-#define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \
- _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
-
-#define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \
- _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1, p2), \
- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1, p2), \
- int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1, p2)))
-
#define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));})
-#define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
-
-#define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
- int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
- int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
-
-#define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1), \
- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
-
-#define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
- int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, p2), \
- int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
-
#define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
#endif /* MVE Integer. */
-#define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
- int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1), \
- int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1)))
-
-#define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
- int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
- int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
-
#endif /* __cplusplus */
#endif /* __ARM_FEATURE_MVE */
#endif /* _GCC_ARM_MVE_H. */
[(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrwq_gather_base_<supf>v4si"))
(set_attr "length" "8")])
+;; Gather loads with shifted offset
+;;
+;; [vldrhq_gather_shifted_offset_s vldrhq_gather_shifted_offset_u]
+;; [vldrhq_gather_shifted_offset_f]
+;; [vldrwq_gather_shifted_offset_s vldrwq_gather_shifted_offset_u]
+;; [vldrwq_gather_shifted_offset_f]
+;; [vldrdq_gather_shifted_offset_s vldrdq_gather_shifted_offset_u]
+;;
+(define_insn "@mve_vldrq_gather_shifted_offset_<mode>"
+ [(set (match_operand:MVE_VLD_ST_scatter_shifted 0 "s_register_operand" "=&w")
+ (unspec:MVE_VLD_ST_scatter_shifted
+ [(match_operand:SI 1 "register_operand" "r")
+ (match_operand:<MVE_scatter_offset> 2 "s_register_operand" "w")
+ (mem:BLK (scratch))]
+ VLDRGSOQ))
+ ]
+ "(TARGET_HAVE_MVE && VALID_MVE_SI_MODE (<MODE>mode))
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (<MODE>mode))"
+ "vldr<MVE_elem_ch>.<MVE_u_elem>\t%q0, [%1, %q2, uxtw #<MVE_scatter_shift>]"
+ [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrq_gather_shifted_offset_<mode>"))
+ (set_attr "length" "4")])
+
+;; Extending gather loads with shifted offset
;;
;; [vldrhq_gather_shifted_offset_s vldrhq_gather_shifted_offset_u]
;;
-(define_insn "mve_vldrhq_gather_shifted_offset_<supf><mode>"
- [(set (match_operand:MVE_5 0 "s_register_operand" "=&w")
- (unspec:MVE_5 [(match_operand:<MVE_H_ELEM> 1 "memory_operand" "Us")
- (match_operand:MVE_5 2 "s_register_operand" "w")]
- VLDRHGSOQ))
+(define_insn "@mve_vldrq_gather_shifted_offset_extend_v4si<US>"
+ [(set (match_operand:V4SI 0 "s_register_operand" "=&w")
+ (SE:V4SI
+ (unspec:V4HI
+ [(match_operand:SI 1 "register_operand" "r")
+ (match_operand:V4SI 2 "s_register_operand" "w")
+ (mem:BLK (scratch))]
+ VLDRGSOQ_EXT)))
]
"TARGET_HAVE_MVE"
-{
- rtx ops[3];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- if (!strcmp ("<supf>","s") && <V_sz_elem> == 16)
- output_asm_insn ("vldrh.u16\t%q0, [%m1, %q2, uxtw #1]",ops);
- else
- output_asm_insn ("vldrh.<supf><V_sz_elem>\t%q0, [%m1, %q2, uxtw #1]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrhq_gather_shifted_offset_<supf><mode>"))
+ "vldrh.<US>32\t%q0, [%1, %q2, uxtw #1]"
+ [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrq_gather_shifted_offset_extend_v4si<US>"))
(set_attr "length" "4")])
+;; Predicated gather loads with shifted offset
;;
;; [vldrhq_gather_shifted_offset_z_s vldrhq_gather_shited_offset_z_u]
+;; [vldrhq_gather_shifted_offset_z_f]
+;; [vldrwq_gather_shifted_offset_z_s vldrwq_gather_shifted_offset_z_u]
+;; [vldrwq_gather_shifted_offset_z_f]
+;; [vldrdq_gather_shifted_offset_z_s vldrdq_gather_shifted_offset_z_u]
;;
-(define_insn "mve_vldrhq_gather_shifted_offset_z_<supf><mode>"
- [(set (match_operand:MVE_5 0 "s_register_operand" "=&w")
- (unspec:MVE_5 [(match_operand:<MVE_H_ELEM> 1 "memory_operand" "Us")
- (match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:<MVE_VPRED> 3 "vpr_register_operand" "Up")
- ]VLDRHGSOQ))
+(define_insn "@mve_vldrq_gather_shifted_offset_z_<mode>"
+ [(set (match_operand:MVE_VLD_ST_scatter_shifted 0 "s_register_operand" "=&w")
+ (unspec:MVE_VLD_ST_scatter_shifted
+ [(match_operand:SI 1 "register_operand" "r")
+ (match_operand:<MVE_scatter_offset> 2 "s_register_operand" "w")
+ (match_operand:<MVE_VPRED> 3 "vpr_register_operand" "Up")
+ (mem:BLK (scratch))]
+ VLDRGSOQ_Z))
]
- "TARGET_HAVE_MVE"
-{
- rtx ops[4];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- ops[3] = operands[3];
- if (!strcmp ("<supf>","s") && <V_sz_elem> == 16)
- output_asm_insn ("vpst\n\tvldrht.u16\t%q0, [%m1, %q2, uxtw #1]",ops);
- else
- output_asm_insn ("vpst\n\tvldrht.<supf><V_sz_elem>\t%q0, [%m1, %q2, uxtw #1]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrhq_gather_shifted_offset_<supf><mode>"))
+ "(TARGET_HAVE_MVE && VALID_MVE_SI_MODE (<MODE>mode))
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (<MODE>mode))"
+ "vpst\n\tvldr<MVE_elem_ch>t.<MVE_u_elem>\t%q0, [%1, %q2, uxtw #<MVE_scatter_shift>]"
+ [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrq_gather_shifted_offset_<mode>"))
(set_attr "length" "8")])
+;; Predicated extending gather loads with shifted offset
+;;
+;; [vldrhq_gather_shifted_offset_z_s vldrhq_gather_shifted_offset_z_u]
+;;
+(define_insn "@mve_vldrq_gather_shifted_offset_z_extend_v4si<US>"
+ [(set (match_operand:V4SI 0 "s_register_operand" "=&w")
+ (SE:V4SI
+ (unspec:V4HI
+ [(match_operand:SI 1 "register_operand" "r")
+ (match_operand:V4SI 2 "s_register_operand" "w")
+ (match_operand:V4BI 3 "vpr_register_operand" "Up")
+ (mem:BLK (scratch))]
+ VLDRGSOQ_EXT_Z)))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\n\tvldrht.<US>32\t%q0, [%1, %q2, uxtw #1]"
+ [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrq_gather_shifted_offset_extend_v4si<US>"))
+ (set_attr "length" "4")])
+
;;
;; [vldrdq_gather_base_s vldrdq_gather_base_u]
;;
[(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrdq_gather_base_<supf>v2di"))
(set_attr "length" "8")])
-;;
-;; [vldrdq_gather_shifted_offset_s vldrdq_gather_shifted_offset_u]
-;;
-(define_insn "mve_vldrdq_gather_shifted_offset_<supf>v2di"
- [(set (match_operand:V2DI 0 "s_register_operand" "=&w")
- (unspec:V2DI [(match_operand:V2DI 1 "memory_operand" "Us")
- (match_operand:V2DI 2 "s_register_operand" "w")]
- VLDRDGSOQ))
- ]
- "TARGET_HAVE_MVE"
-{
- rtx ops[3];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- output_asm_insn ("vldrd.u64\t%q0, [%m1, %q2, uxtw #3]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrdq_gather_shifted_offset_<supf>v2di"))
- (set_attr "length" "4")])
-
-;;
-;; [vldrdq_gather_shifted_offset_z_s vldrdq_gather_shifted_offset_z_u]
-;;
-(define_insn "mve_vldrdq_gather_shifted_offset_z_<supf>v2di"
- [(set (match_operand:V2DI 0 "s_register_operand" "=&w")
- (unspec:V2DI [(match_operand:V2DI 1 "memory_operand" "Us")
- (match_operand:V2DI 2 "s_register_operand" "w")
- (match_operand:V2QI 3 "vpr_register_operand" "Up")]
- VLDRDGSOQ))
- ]
- "TARGET_HAVE_MVE"
-{
- rtx ops[3];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- output_asm_insn ("vpst\n\tvldrdt.u64\t%q0, [%m1, %q2, uxtw #3]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrdq_gather_shifted_offset_<supf>v2di"))
- (set_attr "length" "8")])
-
-;;
-;; [vldrhq_gather_shifted_offset_f]
-;;
-(define_insn "mve_vldrhq_gather_shifted_offset_fv8hf"
- [(set (match_operand:V8HF 0 "s_register_operand" "=&w")
- (unspec:V8HF [(match_operand:V8HI 1 "memory_operand" "Us")
- (match_operand:V8HI 2 "s_register_operand" "w")]
- VLDRHQGSO_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-{
- rtx ops[3];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- output_asm_insn ("vldrh.f16\t%q0, [%m1, %q2, uxtw #1]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrhq_gather_shifted_offset_fv8hf"))
- (set_attr "length" "4")])
-
-;;
-;; [vldrhq_gather_shifted_offset_z_f]
-;;
-(define_insn "mve_vldrhq_gather_shifted_offset_z_fv8hf"
- [(set (match_operand:V8HF 0 "s_register_operand" "=&w")
- (unspec:V8HF [(match_operand:V8HI 1 "memory_operand" "Us")
- (match_operand:V8HI 2 "s_register_operand" "w")
- (match_operand:V8BI 3 "vpr_register_operand" "Up")]
- VLDRHQGSO_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-{
- rtx ops[4];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- ops[3] = operands[3];
- output_asm_insn ("vpst\n\tvldrht.f16\t%q0, [%m1, %q2, uxtw #1]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrhq_gather_shifted_offset_fv8hf"))
- (set_attr "length" "8")])
-
;;
;; [vldrwq_gather_base_f]
;;
[(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrwq_gather_base_fv4sf"))
(set_attr "length" "8")])
-;;
-;; [vldrwq_gather_shifted_offset_f]
-;;
-(define_insn "mve_vldrwq_gather_shifted_offset_fv4sf"
- [(set (match_operand:V4SF 0 "s_register_operand" "=&w")
- (unspec:V4SF [(match_operand:V4SI 1 "memory_operand" "Us")
- (match_operand:V4SI 2 "s_register_operand" "w")]
- VLDRWQGSO_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-{
- rtx ops[3];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- output_asm_insn ("vldrw.u32\t%q0, [%m1, %q2, uxtw #2]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrwq_gather_shifted_offset_fv4sf"))
- (set_attr "length" "4")])
-
-;;
-;; [vldrwq_gather_shifted_offset_s vldrwq_gather_shifted_offset_u]
-;;
-(define_insn "mve_vldrwq_gather_shifted_offset_<supf>v4si"
- [(set (match_operand:V4SI 0 "s_register_operand" "=&w")
- (unspec:V4SI [(match_operand:V4SI 1 "memory_operand" "Us")
- (match_operand:V4SI 2 "s_register_operand" "w")]
- VLDRWGSOQ))
- ]
- "TARGET_HAVE_MVE"
-{
- rtx ops[3];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- output_asm_insn ("vldrw.u32\t%q0, [%m1, %q2, uxtw #2]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrwq_gather_shifted_offset_<supf>v4si"))
- (set_attr "length" "4")])
-
-;;
-;; [vldrwq_gather_shifted_offset_z_f]
-;;
-(define_insn "mve_vldrwq_gather_shifted_offset_z_fv4sf"
- [(set (match_operand:V4SF 0 "s_register_operand" "=&w")
- (unspec:V4SF [(match_operand:V4SI 1 "memory_operand" "Us")
- (match_operand:V4SI 2 "s_register_operand" "w")
- (match_operand:V4BI 3 "vpr_register_operand" "Up")]
- VLDRWQGSO_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-{
- rtx ops[4];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- ops[3] = operands[3];
- output_asm_insn ("vpst\n\tvldrwt.u32\t%q0, [%m1, %q2, uxtw #2]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrwq_gather_shifted_offset_fv4sf"))
- (set_attr "length" "8")])
-
-;;
-;; [vldrwq_gather_shifted_offset_z_s vldrwq_gather_shifted_offset_z_u]
-;;
-(define_insn "mve_vldrwq_gather_shifted_offset_z_<supf>v4si"
- [(set (match_operand:V4SI 0 "s_register_operand" "=&w")
- (unspec:V4SI [(match_operand:V4SI 1 "memory_operand" "Us")
- (match_operand:V4SI 2 "s_register_operand" "w")
- (match_operand:V4BI 3 "vpr_register_operand" "Up")]
- VLDRWGSOQ))
- ]
- "TARGET_HAVE_MVE"
-{
- rtx ops[4];
- ops[0] = operands[0];
- ops[1] = operands[1];
- ops[2] = operands[2];
- ops[3] = operands[3];
- output_asm_insn ("vpst\n\tvldrwt.u32\t%q0, [%m1, %q2, uxtw #2]",ops);
- return "";
-}
- [(set (attr "mve_unpredicated_insn") (symbol_ref "CODE_FOR_mve_vldrwq_gather_shifted_offset_<supf>v4si"))
- (set_attr "length" "8")])
-
;; Vector scatter stores with shifted offset
;;
;; [vstrhq_scatter_shifted_offset_s vstrhq_scatter_shifted_offset_u]