}
static really_inline m128 insert32_m128(m128 in, u32 val, const int imm) {
- return vsetq_lane_u32((uint32x4_t)in, val, imm);
+ return (m128) vsetq_lane_u32(val, (uint32x4_t)in, imm);
}
static really_inline u32 movd(const m128 in) {
return (m128) vsetq_lane_u64(*p, (uint64x2_t) zeroes128(), 0);
}
-/* another form of movq */
-static really_inline
-m128 load_m128_from_u64a(const u64a *p) {
- return (m128) vsetq_lane_u64(*p, (uint64x2_t) zeroes128(), 0);
-}
-
static really_inline u32 extract32from128(const m128 in, unsigned imm) {
#if defined(HAVE__BUILTIN_CONSTANT_P)
if (__builtin_constant_p(imm)) {
return (m128) vld1q_u64((uint64_t *) data);
}
+static really_inline
+m128 widenlo128(m128 x) {
+ return (m128) vmovl_u32(vget_low_u32((uint32x4_t)x));
+}
+
+static really_inline
+m128 widenhi128(m128 x) {
+ return (m128) vmovl_u32(vget_high_u32((uint32x4_t)x));
+}
+
#endif // ARCH_ARM_SIMD_UTILS_H