#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
-static really_inline m128 set16x8(u8 c) {
+static really_inline m128 set1_16x8(u8 c) {
return _mm_set1_epi8(c);
}
-static really_inline m128 set4x32(u32 c) {
+static really_inline m128 set1_4x32(u32 c) {
return _mm_set1_epi32(c);
}
+static really_inline m128 set1_2x64(u64a c) {
+ return _mm_set1_epi64x(c);
+}
+
static really_inline u32 movd(const m128 in) {
return _mm_cvtsi128_si32(in);
}
}
static really_inline
-m128 set64x2(u64a hi, u64a lo) {
+m128 set4x32(u32 x3, u32 x2, u32 x1, u32 x0) {
+ return _mm_set_epi32(x3, x2, x1, x0);
+}
+
+static really_inline
+m128 set2x64(u64a hi, u64a lo) {
return _mm_set_epi64x(hi, lo);
}
#define rshift64_m256(a, b) _mm256_srli_epi64((a), (b))
-static really_inline
-m256 set32x8(u32 in) {
- return _mm256_set1_epi8(in);
+static really_inline m256 set1_4x64(u64a c) {
+ return _mm256_set1_epi64x(c);
}
#define eq256(a, b) _mm256_cmpeq_epi8((a), (b))
#define movemask256(a) ((u32)_mm256_movemask_epi8((a)))
static really_inline
-m256 set2x128(m128 a) {
+m256 set1_2x128(m128 a) {
return _mm256_broadcastsi128_si256(a);
}
rv.hi = rshift64_m128(rv.hi, b);
return rv;
}
-static really_inline
-m256 set32x8(u32 in) {
- m256 rv;
- rv.lo = set16x8((u8) in);
- rv.hi = rv.lo;
- return rv;
-}
static really_inline
m256 eq256(m256 a, m256 b) {
}
static really_inline
-m256 set2x128(m128 a) {
+m256 set1_2x128(m128 a) {
m256 rv = {a, a};
return rv;
}
// aligned load of 128-bit value to low and high part of 256-bit value
static really_inline m256 load2x128(const void *ptr) {
#if defined(HAVE_AVX2)
- return set2x128(load128(ptr));
+ return set1_2x128(load128(ptr));
#else
assert(ISALIGNED_N(ptr, alignof(m128)));
m256 rv;
}
static really_inline m256 loadu2x128(const void *ptr) {
- return set2x128(loadu128(ptr));
+ return set1_2x128(loadu128(ptr));
}
// aligned store
}
static really_inline
-m256 set64x4(u64a hi_1, u64a hi_0, u64a lo_1, u64a lo_0) {
+m256 set1_32x8(u32 in) {
+#if defined(HAVE_AVX2)
+ return _mm256_set1_epi8(in);
+#else
+ m256 rv;
+ rv.hi = set1_16x8(in);
+ rv.lo = set1_16x8(in);
+ return rv;
+#endif
+}
+
+static really_inline
+m256 set8x32(u32 hi_3, u32 hi_2, u32 hi_1, u32 hi_0, u32 lo_3, u32 lo_2, u32 lo_1, u32 lo_0) {
+#if defined(HAVE_AVX2)
+ return _mm256_set_epi32(hi_3, hi_2, hi_1, hi_0, lo_3, lo_2, lo_1, lo_0);
+#else
+ m256 rv;
+ rv.hi = set4x32(hi_3, hi_2, hi_1, hi_0);
+ rv.lo = set4x32(lo_3, lo_2, lo_1, lo_0);
+ return rv;
+#endif
+}
+
+static really_inline
+m256 set4x64(u64a hi_1, u64a hi_0, u64a lo_1, u64a lo_0) {
#if defined(HAVE_AVX2)
return _mm256_set_epi64x(hi_1, hi_0, lo_1, lo_0);
#else
m256 rv;
- rv.hi = set64x2(hi_1, hi_0);
- rv.lo = set64x2(lo_1, lo_0);
+ rv.hi = set2x64(hi_1, hi_0);
+ rv.lo = set2x64(lo_1, lo_0);
return rv;
#endif
}
#if defined(HAVE_AVX512)
static really_inline
-m512 set64x8(u8 a) {
+m512 set1_64x8(u8 a) {
return _mm512_set1_epi8(a);
}
static really_inline
-m512 set8x64(u64a a) {
+m512 set1_8x64(u64a a) {
return _mm512_set1_epi64(a);
}
static really_inline
-m512 set512_64(u64a hi_3, u64a hi_2, u64a hi_1, u64a hi_0,
+m512 set8x64(u64a hi_3, u64a hi_2, u64a hi_1, u64a hi_0,
u64a lo_3, u64a lo_2, u64a lo_1, u64a lo_0) {
return _mm512_set_epi64(hi_3, hi_2, hi_1, hi_0,
lo_3, lo_2, lo_1, lo_0);
}
static really_inline
-m512 set4x128(m128 a) {
+m512 set1_4x128(m128 a) {
return _mm512_broadcast_i32x4(a);
}
#endif