/*
* Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2020-2021, VectorCamp PC
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
typedef __vector uint8_t uint8x16_t;
typedef __vector int8_t int8x16_t;
+
static really_inline m128 ones128(void) {
- // the value in function must be a signed literal in range -16 to 15
return (m128) vec_splat_s8(-1);
}
* mask indicating which 32-bit words contain differences.
*/
static really_inline u32 diffrich128(m128 a, m128 b) {
- static const m128 movemask = { 1, 2, 4, 8 };
- m128 mask = (m128) vec_cmpeq(a, b);
- mask = vec_and(vec_xor(mask, mask), movemask);
- m128 sum = vec_sums(mask, zeroes128());
- sum = vec_sld(zeroes128(), sum, 4);
- s32 ALIGN_ATTR(16) x;
- vec_ste(sum, 0, &x);
- return x;
+ static const m128 movemask = { 1, 2, 4, 8 };
+ m128 mask = (m128) vec_cmpeq(a, b); // _mm_cmpeq_epi32 (a, b);
+ mask = vec_and(not128(mask), movemask);
+ m128 sum = vec_sums(mask, zeroes128());
+ //sum = vec_sld(zeroes128(), sum, 4);
+ //s32 ALIGN_ATTR(16) x;
+ //vec_ste(sum, 0, &x);
+ //return x; // it could be ~(movemask_128(mask)) & 0x;
+ return sum[3];
}
/**
static really_inline u32 diffrich64_128(m128 a, m128 b) {
static const uint64x2_t movemask = { 1, 4 };
uint64x2_t mask = (uint64x2_t) vec_cmpeq((uint64x2_t)a, (uint64x2_t)b);
- mask = vec_and(vec_xor(mask, mask), movemask);
+ mask = (uint64x2_t) vec_and((uint64x2_t)not128((m128)mask), movemask);
m128 sum = vec_sums((m128)mask, zeroes128());
- sum = vec_sld(zeroes128(), sum, 4);
- s32 ALIGN_ATTR(16) x;
- vec_ste(sum, 0, &x);
- return x;
+ //sum = vec_sld(zeroes128(), sum, 4);
+ //s32 ALIGN_ATTR(16) x;
+ //vec_ste(sum, 0, &x);
+ //return x;
+ return sum[3];
}
static really_really_inline
}
static really_really_inline
-m128 lshift_m128(m128 a, unsigned UNUSED b) {
- // #warning FIXME
- // b must be 4 bit literal
- return (m128) vec_sld(a, zeroes128(), 0);
+m128 lshift_m128(m128 a, unsigned b) {
+ switch(b){
+ case 1: return vec_sld(a, zeroes128(), 1); break;
+ case 2: return vec_sld(a, zeroes128(), 2); break;
+ case 3: return vec_sld(a, zeroes128(), 3); break;
+ case 4: return vec_sld(a, zeroes128(), 4); break;
+ case 5: return vec_sld(a, zeroes128(), 5); break;
+ case 6: return vec_sld(a, zeroes128(), 6); break;
+ case 7: return vec_sld(a, zeroes128(), 7); break;
+ case 8: return vec_sld(a, zeroes128(), 8); break;
+ case 9: return vec_sld(a, zeroes128(), 9); break;
+ case 10: return vec_sld(a, zeroes128(), 10); break;
+ case 11: return vec_sld(a, zeroes128(), 11); break;
+ case 12: return vec_sld(a, zeroes128(), 12); break;
+ case 13: return vec_sld(a, zeroes128(), 13); break;
+ case 14: return vec_sld(a, zeroes128(), 14); break;
+ case 15: return vec_sld(a, zeroes128(), 15); break;
+ }
+ return a;
}
static really_really_inline
-m128 rshift_m128(m128 a, unsigned UNUSED b) {
- // #warning FIXME
- // b must be 4 bit literal
- return (m128) vec_sld(zeroes128(), a, 0 - 0);
+m128 rshift_m128(m128 a, unsigned b) {
+ switch(b){
+ case 1: return vec_sld(zeroes128(), a, 15); break;
+ case 2: return vec_sld(zeroes128(), a, 14); break;
+ case 3: return vec_sld(zeroes128(), a, 13); break;
+ case 4: return vec_sld(zeroes128(), a, 12); break;
+ case 5: return vec_sld(zeroes128(), a, 11); break;
+ case 6: return vec_sld(zeroes128(), a, 10); break;
+ case 7: return vec_sld(zeroes128(), a, 9); break;
+ case 8: return vec_sld(zeroes128(), a, 8); break;
+ case 9: return vec_sld(zeroes128(), a, 7); break;
+ case 10: return vec_sld(zeroes128(), a, 6); break;
+ case 11: return vec_sld(zeroes128(), a, 5); break;
+ case 12: return vec_sld(zeroes128(), a, 4); break;
+ case 13: return vec_sld(zeroes128(), a, 3); break;
+ case 14: return vec_sld(zeroes128(), a, 2); break;
+ case 15: return vec_sld(zeroes128(), a, 1); break;
+ }
+ return a;
}
static really_really_inline
-m128 lshift64_m128(m128 a, unsigned UNUSED b) {
- // #warnint FIXME
- // b must be 4 bit literal
- return (m128) vec_sld(zeroes128(), a, 0);
-
+m128 lshift64_m128(m128 a, unsigned b) {
+ uint64x2_t shift_indices = vec_splats((uint64_t)b);
+ return (m128) vec_sl((int64x2_t)a, shift_indices);
}
static really_really_inline
-m128 rshift64_m128(m128 a, unsigned UNUSED b) {
- // warnint FIXME
- // b must be 4 bit literal
- return (m128) vec_sld(zeroes128(), a, 0);
+m128 rshift64_m128(m128 a, unsigned b) {
+ uint64x2_t shift_indices = vec_splats((uint64_t)b);
+ return (m128) vec_sr((int64x2_t)a, shift_indices);
}
static really_inline m128 eq128(m128 a, m128 b) {
}
static really_inline m128 eq64_m128(m128 a, m128 b) {
- return (m128) vec_cmpeq((uint64x2_t)a, (uint64x2_t)b);
+ return (m128) vec_cmpeq((uint64x2_t)a, (uint64x2_t)b);
}
static really_inline u32 movemask128(m128 a) {
- //static const uint8x16_t powers = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };
-
- // Compute the mask from the input
- //uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)a, powers))));
- //uint64x2_t mask1 = (m128)vextq_s8(mask, zeroes128(), 7);
- //mask = vorrq_u8(mask, mask1);
-
- // Get the resulting bytes
- //uint16_t output;
- //vst1q_lane_u16((uint16_t*)&output, (uint16x8_t)mask, 0);
- //return output;
- // #warning FIXME
- return !!diff128(a, zeroes128());
+ uint8x16_t s1 = vec_sr((uint8x16_t)a, vec_splat_u8(7));
+ uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7));
+ uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff));
+ uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and);
+
+ uint32x4_t ss2 = vec_sr((uint32x4_t)s2, vec_splat_u32(14));
+ uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff));
+ uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2);
+
+ uint64x2_t ss3 = vec_sr((uint64x2_t)s3, (uint64x2_t)vec_splats(28));
+ uint64x2_t res_and3 = vec_and((uint64x2_t)s3, vec_splats((uint64_t)0xff));
+ uint64x2_t s4 = vec_or((uint64x2_t)ss3, res_and3);
+
+ uint64x2_t ss4 = vec_sld((uint64x2_t)vec_splats(0), s4, 9);
+ uint64x2_t res_and4 = vec_and((uint64x2_t)ss4, vec_splats((uint64_t)0xff));
+ uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4);
+
+ return s5[0];
}
-static really_inline m128 set1_16x8(u8 UNUSED c) {
- // warning FIXME
- // c must be 5 bit literal
- // a solution is to use vec_splats
- //return (m128) vec_splat_u8(0);
+static really_inline m128 set1_16x8(u8 c) {
return (m128) vec_splats(c);
}
-static really_inline m128 set1_4x32(u32 UNUSED c) {
- // warning FIXME
- // c must be 5 bit literal
- // a solution is to use vec_splats
- // return (m128) vec_splat_u32(0);
+static really_inline m128 set1_4x32(u32 c) {
return (m128) vec_splats(c);
}
}
static really_inline u64a movq(const m128 in) {
- //return vgetq_lane_u64((uint64x2_t) in, 0);
- return !!diff128(in, zeroes128());
- // #warning FIXME
+ u64a ALIGN_ATTR(16) a[2];
+ vec_xst((uint64x2_t) in, 0, a);
+ return a[0];
}
/* another form of movq */
static really_inline
m128 load_m128_from_u64a(const u64a *p) {
- return (m128) vec_ld(0,p);
+ return (m128) vec_ld(0, p);
}
return vec_any_ne(in,lshift_m128(in,imm));
}
-static really_inline u64a extract64from128(const m128 in, unsigned imm) {
-/*
+static really_inline u64a extract64from128(const m128 UNUSED in, unsigned UNUSED imm) {
+/* is this
#if defined(HS_OPTIMIZE)
return vgetq_lane_u64((uint64x2_t) in, imm);
#else
break;
}
#endif
-*/
-// #warning FIXME
-return vec_any_ne(in,lshift_m128(in,imm));
+*/
+ /*
+ u64a ALIGN_ATTR(16) a[2];
+ vec_xst((uint64x2_t) in, 0, a);
+ switch(imm) {
+ case 0: return a[0]; break;
+ case 1: return a[1]; break;
+ default: return 0; break;
+ }
+ */
+return 0;
+
}
static really_inline m128 low64from128(const m128 in) {
- //return vcombine_u64(vget_low_u64(in), vdup_n_u64(0));
- // #warning FIXME
- return in;
+ //u64a ALIGN_ATTR(16) a[2];
+ //vec_xst((uint64x2_t) in, 0, a);
+ //return a[1];
+ return vec_add(in, in);
}
static really_inline m128 high64from128(const m128 in) {
- //return vcombine_u64(vget_high_u64(in), vdup_n_u64(0));
- // #warning FIXME
- return in;
+ //u64a ALIGN_ATTR(16) a[2];
+ //vec_xst((uint64x2_t) in, 0, a);
+ //return a[0];
+ return vec_add(in, in);
}
}
static really_inline m128 andnot128(m128 a, m128 b) {
- m128 and_res = and128(a,b);
- return (m128) not128(and_res);
- // or
- //return (m128) not128(and128(a,b));
+ return (m128) and128(not128(a),b);
}
// aligned load
static really_inline m128 load128(const void *ptr) {
assert(ISALIGNED_N(ptr, alignof(m128)));
- //return (m128) vec_ld(0, ptr);
- // #warning FIXME
- return zeroes128();
+ return (m128) vec_xl(0, (const int32_t*)ptr);
}
// aligned store
-static really_inline void store128(void *ptr, m128 UNUSED a) {
+static really_inline void store128(void *ptr, m128 a) {
assert(ISALIGNED_N(ptr, alignof(m128)));
- //vec_st(a, 0, ptr);
- // warning FIXME
+ vec_st(a, 0, (int32_t*)ptr);
}
// unaligned load
-static really_inline m128 loadu128(const void UNUSED *ptr) {
- //return (m128) vec_ld(0, ptr);
- // #warning FIXME
- return zeroes128();
+static really_inline m128 loadu128(const void *ptr) {
+ return (m128) vec_xl(0, (const int64_t*)ptr);
}
// unaligned store
-static really_inline void storeu128(void UNUSED *ptr, m128 UNUSED a) {
- // #warning FIXME
+static really_inline void storeu128(void *ptr, m128 a) {
+ vec_st(a, 0, (int32_t*)ptr);
}
// packed unaligned store of first N bytes
}
-//#define CASE_ALIGN_VECTORS(a, b, offset) case offset: return (m128)vextq_s8((int8x16_t)(a), (int8x16_t)(b), (offset)); break;
+#define CASE_ALIGN_VECTORS(a, b, offset) case offset: return (m128)vec_sld((int8x16_t)(b), (int8x16_t)(a), (16 - offset)); break;
static really_really_inline
m128 palignr_imm(m128 r, m128 l, int offset) {
- /*
switch (offset) {
case 0: return l; break;
CASE_ALIGN_VECTORS(l, r, 1);
CASE_ALIGN_VECTORS(l, r, 14);
CASE_ALIGN_VECTORS(l, r, 15);
case 16: return r; break;
- default:
- return zeroes128();
- break;
- }
- */
- // #warning FIXME
- return (m128) vec_cmpeq(r,lshift_m128(l,offset));
+ default: return zeroes128(); break;
+ }
}
static really_really_inline
m128 palignr(m128 r, m128 l, int offset) {
-/*
#if defined(HS_OPTIMIZE)
- return (m128)vextq_s8((int8x16_t)l, (int8x16_t)r, offset);
+ return (m128)vec_sld((int8x16_t)l, (int8x16_t)r, offset);
#else
return palignr_imm(r, l, offset);
#endif
-*/
-// #warning FIXME
-return (m128) vec_cmpeq(r, lshift_m128(l,offset));
}
#undef CASE_ALIGN_VECTORS
static really_really_inline
m128 rshiftbyte_m128(m128 a, unsigned b) {
- // #warning FIXME
- // return vec_sro(a,b);
return rshift_m128(a,b);
}
static really_really_inline
m128 lshiftbyte_m128(m128 a, unsigned b) {
- //#warning FIXME
- //return vec_slo(a,b);
- return lshift_m128(a,b);
+ return lshift_m128(a,b);
}
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
-/*
assert(amount >= -16 && amount <= 16);
- static const uint8x16_t vbs_mask = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
- const uint8x16_t outside_mask = set1_16x8(0xf0);
-
- m128 shift_mask = palignr_imm(vbs_mask, outside_mask, 16 - amount);
- return vqtbl1q_s8(in, shift_mask);
-*/
-// #warning FIXME
-return lshift_m128(in,amount);
+ if (amount < 0){
+ return palignr_imm(zeroes128(), in, -amount);
+ } else{
+ return palignr_imm(in, zeroes128(), 16 - amount);
+ }
}
#ifdef __cplusplus
static really_inline
m128 pshufb_m128(m128 a, m128 b) {
- /* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf.
- In NEON, if >=16, then the result is zero, otherwise it is that lane.
- btranslated is the version that is converted from Intel to NEON. */
- //int8x16_t btranslated = vandq_s8((int8x16_t)b,vdupq_n_s8(0x8f));
- //return (m128)vqtbl1q_s8((int8x16_t)a, (uint8x16_t)btranslated);
- // #warning FIXME
- return (m128) vec_max((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_permxor((int8x16_t)vec_splat_s8(0), (int8x16_t)a, (int8x16_t)b);
}
static really_inline
m128 max_u8_m128(m128 a, m128 b) {
- return (m128) vec_max((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_max((uint8x16_t)a, (uint8x16_t)b);
}
static really_inline
m128 min_u8_m128(m128 a, m128 b) {
- return (m128) vec_min((int8x16_t)a, (int8x16_t)b);
+ return (m128) vec_min((uint8x16_t)a, (uint8x16_t)b);
}
static really_inline
m128 sadd_u8_m128(m128 a, m128 b) {
- return (m128) vec_add((uint8x16_t)a, (uint8x16_t)b);
+ return (m128) vec_adds((uint8x16_t)a, (uint8x16_t)b);
}
static really_inline
}
static really_inline
-m128 set4x32(u32 UNUSED x3, u32 UNUSED x2, u32 UNUSED x1, u32 UNUSED x0) {
- //uint32_t ALIGN_ATTR(16) data[4] = { x0, x1, x2, x3 };
- //return (m128) vec_splat_u32(data);
- // #warning FIXME
- return zeroes128();
+m128 set4x32(u32 x3, u32 x2, u32 x1, u32 x0) {
+ uint32x4_t v = { x0, x1, x2, x3 };
+ return (m128) v;
}
static really_inline
-m128 set2x64(u64a UNUSED hi, u64a UNUSED lo) {
- //uint64_t ALIGN_ATTR(16) data[2] = { lo, hi };
- //return (m128) vec_splats(data);
- // #warning FIXME
- return zeroes128();
+m128 set2x64(u64a hi, u64a lo) {
+ uint64x2_t v = { lo, hi };
+ return (m128) v;
}
#endif // ARCH_PPC64EL_SIMD_UTILS_H
#include "util/supervector/supervector.hpp"
#include <iostream>
+
+typedef __vector uint64_t uint64x2_t;
+typedef __vector int64_t int64x2_t;
+typedef __vector uint32_t uint32x4_t;
+typedef __vector int32_t int32x4_t;
+typedef __vector uint16_t uint16x8_t;
+typedef __vector int16_t int16x8_t;
+typedef __vector uint8_t uint8x16_t;
+typedef __vector int8_t int8x16_t;
+
// 128-bit Powerpc64le implementation
+union Tmp
+{
+ uint32_t u32;
+ uint16_t u16[2];
+};
+
template<>
really_inline SuperVector<16>::SuperVector(SuperVector const &other)
{
template<>
really_inline SuperVector<16>::SuperVector<int8_t>(int8_t const other)
{
- //u.v128[0] = _mm_set1_epi8(other);
- //u.v128[0] = vec_splat_s8(other);
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(other);
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<uint8_t>(uint8_t const other)
{
- //u.v128[0] = _mm_set1_epi8(static_cast<int8_t>(other));
- //u.v128[0] = vec_splat_s8(static_cast<int8_t>(other));
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(static_cast<int8_t>(other));
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<int16_t>(int16_t const other)
{
- //u.v128[0] = _mm_set1_epi16(other);
- //u.v128[0] = vec_splat_s16(other);
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(other);
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<uint16_t>(uint16_t const other)
{
- //u.v128[0] = _mm_set1_epi16(static_cast<int16_t>(other));
- //u.v128[0] = vec_splat_s16(static_cast<int8_t>(other));
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(static_cast<int8_t>(other));
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<int32_t>(int32_t const other)
{
- //u.v128[0] = _mm_set1_epi32(other);
- //u.v128[0] = vec_splat_s32(other);
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(other);
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<uint32_t>(uint32_t const other)
{
- //u.v128[0] = _mm_set1_epi32(static_cast<int32_t>(other));
- //u.v128[0] = vec_splat_s32(static_cast<int8_t>(other));
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(static_cast<int8_t>(other));
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<int64_t>(int64_t const other)
{
- //u.v128[0] = _mm_set1_epi64x(other);
- //u.v128[0] = vec_splat_u64(other);
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(other);
}
template<>
template<>
really_inline SuperVector<16>::SuperVector<uint64_t>(uint64_t const other)
{
- //u.v128[0] = _mm_set1_epi64x(static_cast<int64_t>(other));
- //u.v128[0] = vec_splat_u32(static_cast<int8_t>(other));
- std::cout<<other<<std::endl;
+ u.v128[0] = (m128) vec_splats(static_cast<int8_t>(other));
}
// Constants
template<>
really_inline SuperVector<16> SuperVector<16>::Ones(void)
{
- //return {_mm_set1_epi8(0xFF)};
- return {(m128) vec_splat_s8(1)};
+ return {(m128) vec_splat_s8(-1)};
}
template<>
really_inline SuperVector<16> SuperVector<16>::Zeroes(void)
{
- //return {_mm_set1_epi8(0)};
-return {(m128) vec_splat_s8(0)};
+ return {(m128) vec_splat_s8(0)};
}
// Methods
template <>
really_inline SuperVector<16> SuperVector<16>::operator&(SuperVector<16> const &b) const
{
- //return {_mm_and_si128(u.v128[0], b.u.v128[0])};
return {vec_and(u.v128[0], b.u.v128[0])};
}
template <>
really_inline SuperVector<16> SuperVector<16>::operator|(SuperVector<16> const &b) const
{
- //return {_mm_or_si128(u.v128[0], b.u.v128[0])};
return {vec_or(u.v128[0], b.u.v128[0])};
}
template <>
really_inline SuperVector<16> SuperVector<16>::operator^(SuperVector<16> const &b) const
{
- //return {_mm_xor_si128(u.v128[0], b.u.v128[0])};
- return {vec_xor(u.v128[0], b.u.v128[0])};
+ return {(m128) vec_xor(u.v128[0], b.u.v128[0])};
}
template <>
really_inline SuperVector<16> SuperVector<16>::opandnot(SuperVector<16> const &b) const
{
- //return {_mm_andnot_si128(u.v128[0], b.u.v128[0])};
m128 and_res = vec_and(u.v128[0], b.u.v128[0]);
return vec_xor(and_res,and_res);
}
template <>
really_inline SuperVector<16> SuperVector<16>::eq(SuperVector<16> const &b) const
{
- //return {_mm_cmpeq_epi8(u.v128[0], b.u.v128[0])};
- return { vec_all_eq(u.v128[0], b.u.v128[0])};
+ return {(m128) vec_cmpeq((int8x16_t)u.v128[0], (int8x16_t)b.u.v128[0])};
}
template <>
really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask(void)const
-{
- //return _mm_movemask_epi8(u.v128[0]);
- // Compute the mask from the input
- //uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)u.v128[0], 0))));
- //uint64x2_t mask1 = (m128)vextq_s8(mask, Zeroes(), 7);
- //mask = vorrq_u8(mask, mask1);
+{
+ uint8x16_t s1 = vec_sr((uint8x16_t)u.v128[0], vec_splat_u8(7));
+ uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7));
+ uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff));
+ uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and);
- // Get the resulting bytes
- //uint16_t output;
- //vst1q_lane_u16((uint16_t*)&output, (uint16x8_t)mask, 0);
- //return output;
- //#warning FIXME
- return 0;
+ uint32x4_t ss2 = vec_sr((uint32x4_t)s2 , vec_splat_u32(14));
+ uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff));
+ uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2);
+
+ uint64x2_t ss3 = vec_sr((uint64x2_t)s3, (uint64x2_t)vec_splats(28));
+ uint64x2_t res_and3 = vec_and((uint64x2_t)s3, vec_splats((uint64_t)0xff));
+ uint64x2_t s4 = vec_or((uint64x2_t)ss3, res_and3);
+
+ uint64x2_t ss4 = vec_sld((uint64x2_t) vec_splats(0), s4, 9);
+ uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff));
+ uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4);
+
+ return s5[0];
}
template <>
really_inline typename SuperVector<16>::movemask_type SuperVector<16>::eqmask(SuperVector<16> const b) const
{
- return eq(b).movemask();
+ return eq(b).movemask();
}
template <>
really_inline SuperVector<16> SuperVector<16>::rshift128_var(uint8_t const N) const
-{
- /*
+{
switch(N) {
- case 1: return {vec_srl(u.v128[0], Zeroes(), 1)}; break;
- case 2: return {vec_srl(u.v128[0], Zeroes(), 2)}; break;
- case 3: return {vec_srl(u.v128[0], Zeroes(),3)}; break;
- case 4: return {vec_srl(u.v128[0], Zeroes(),4)}; break;
- case 5: return {vec_srl(u.v128[0], Zeroes(),5)}; break;
- case 6: return {vec_srl(u.v128[0], Zeroes(),6)}; break;
- case 7: return {vec_srl(u.v128[0], Zeroes(),7)}; break;
- case 8: return {vec_srl(u.v128[0], Zeroes(),8)}; break;
- case 9: return {vec_srl(u.v128[0], Zeroes(),9)}; break;
- case 10: return {vec_srl(u.v128[0], Zeroes(),10)}; break;
- case 11: return {vec_srl(u.v128[0], Zeroes(),11)}; break;
- case 12: return {vec_srl(u.v128[0], Zeroes(),12)}; break;
- case 13: return {vec_srl(u.v128[0], Zeroes(),13)}; break;
- case 14: return {vec_srl(u.v128[0], Zeroes(),14)}; break;
- case 15: return {vec_srl(u.v128[0], Zeroes(),15)}; break;
+ case 1: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 15)}; break;
+ case 2: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 14)}; break;
+ case 3: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 13)}; break;
+ case 4: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 12)}; break;
+ case 5: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 11)}; break;
+ case 6: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 10)}; break;
+ case 7: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 9)}; break;
+ case 8: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 8)}; break;
+ case 9: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 7)}; break;
+ case 10: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 6)}; break;
+ case 11: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 5)}; break;
+ case 12: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 4)}; break;
+ case 13: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 3)}; break;
+ case 14: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 2)}; break;
+ case 15: return {(m128) vec_sld((int16x8_t) vec_splat_s8(0), (int16x8_t) u.v128[0], 1)}; break;
case 16: return Zeroes(); break;
default: break;
}
return *this;
- */
- std::cout<<N<<std::endl;
- return Zeroes();
}
-#ifdef HS_OPTIMIZE
template <>
really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const
{
- //return {vec_srl(u.v128[0], N)};
- std::cout<<N<<std::endl;
- return Zeroes();
+ return rshift128_var(N);
}
-#else
-template <>
-really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const
-{
- //return rshift128_var(N);
- std::cout<<N<<std::endl;
- return Zeroes();
-}
-#endif
template <>
really_inline SuperVector<16> SuperVector<16>::lshift128_var(uint8_t const N) const
{
- /*
switch(N) {
- case 1: return {vec_sll(u.v128[0], 1)}; break;
- case 2: return {vec_sll(u.v128[0], 2)}; break;
- case 3: return {vec_sll(u.v128[0], 3)}; break;
- case 4: return {vec_sll(u.v128[0], 4)}; break;
- case 5: return {vec_sll(u.v128[0], 5)}; break;
- case 6: return {vec_sll(u.v128[0], 6)}; break;
- case 7: return {vec_sll(u.v128[0], 7)}; break;
- case 8: return {vec_sll(u.v128[0], 8)}; break;
- case 9: return {vec_sll(u.v128[0], 9)}; break;
- case 10: return {vec_sll(u.v128[0], 10)}; break;
- case 11: return {vec_sll(u.v128[0], 11)}; break;
- case 12: return {vec_sll(u.v128[0], 12)}; break;
- case 13: return {vec_sll(u.v128[0], 13)}; break;
- case 14: return {vec_sll(u.v128[0], 14)}; break;
- case 15: return {vec_sll(u.v128[0], 15)}; break;
+ case 1: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 1)}; break;
+ case 2: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 2)}; break;
+ case 3: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 3)}; break;
+ case 4: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 4)}; break;
+ case 5: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 5)}; break;
+ case 6: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 6)}; break;
+ case 7: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 7)}; break;
+ case 8: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 8)}; break;
+ case 9: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 9)}; break;
+ case 10: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 10)}; break;
+ case 11: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 11)}; break;
+ case 12: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 12)}; break;
+ case 13: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 13)}; break;
+ case 14: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 14)}; break;
+ case 15: return {(m128) vec_sld((int16x8_t) u.v128[0], (int16x8_t) vec_splat_s8(0), 15)}; break;
case 16: return Zeroes(); break;
default: break;
}
return *this;
- */
- std::cout<<N<<std::endl;
- return Zeroes();
}
-#ifdef HS_OPTIMIZE
-template <>
-really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const
-{
- //return {vec_sll(u.v128[0], N)};
- std::cout<<N<<std::endl;
- return Zeroes();
-}
-#else
template <>
really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const
{
- //return lshift128_var(N);
- std::cout<<N<<std::endl;
- return Zeroes();
+ return lshift128_var(N);
}
-#endif
+
template <>
really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr)
{
- //return _mm_loadu_si128((const m128 *)ptr);
- //#warning FIXME
- std::cout<<ptr<<std::endl;
- return Zeroes();
+ return (m128) vec_xl(0, (const int64_t*)ptr);
}
template <>
really_inline SuperVector<16> SuperVector<16>::load(void const *ptr)
{
- //assert(ISALIGNED_N(ptr, alignof(SuperVector::size)));
- //ptr = assume_aligned(ptr, SuperVector::size);
- //return _mm_load_si128((const m128 *)ptr);
- //assert(ISALIGNED_N(ptr, alignof(m128)));
- //return vld1q_s32((const int32_t *)ptr);
- //#warning FIXME
- std::cout<<ptr<<std::endl;
- return Zeroes();
+ assert(ISALIGNED_N(ptr, alignof(SuperVector::size)));
+ return (m128) vec_xl(0, (const int64_t*)ptr);
}
template <>
really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint8_t const len)
{
- //SuperVector<16> mask = Ones().rshift128_var(16 -len);
- //mask.print8("mask");
- //SuperVector<16> v = vld1q_s32((const int32_t *)ptr);
- //v.print8("v");
- //return mask & v;
- //#warning FIXME
- std::cout<<len<<std::endl;
- std::cout<<ptr<<std::endl;
- return Zeroes();
+ SuperVector<16> mask = Ones().rshift128_var(16 -len);
+ mask.print8("mask");
+ SuperVector<16> v = loadu(ptr);
+ v.print8("v");
+ return mask & v;
}
template<>
really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b)
{
- //return {_mm_shuffle_epi8(u.v128[0], b.u.v128[0])};
- //int8x16_t btranslated = vandq_s8((int8x16_t)b.u.v128[0],vdupq_n_s8(0x8f));
- //return (m128)vqtbl1q_s8((int8x16_t)u.v128[0], (uint8x16_t)btranslated);
- //#warning FIXM
- return eq(b).movemask();
+ return (m128) vec_permxor((int8x16_t)vec_splat_s8(0), (int8x16_t)u.v128[0], (int8x16_t) b.u.v128[0]);
}
-#ifdef HS_OPTIMIZE
-template<>
-really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset)
-{
- //return {vextq_s8(u.v128[0], other.u.v128[0], offset)};
- //#warning FIXME
- std::cout<<offset<<std::endl;
- SuperVector<16> mask = Ones().rshift128_var(16 - 0);
- return mask & pshufb(other);
-}
-#else
template<>
really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset)
{
- /*
+
switch(offset) {
case 0: return other; break;
- case 1: return {vextq_s8(u.v128[0], other.u.v128[0], 1)}; break;
- case 2: return {vextq_s8(u.v128[0], other.u.v128[0], 2)}; break;
- case 3: return {vextq_s8(u.v128[0], other.u.v128[0], 3)}; break;
- case 4: return {vextq_s8(u.v128[0], other.u.v128[0], 4)}; break;
- case 5: return {vextq_s8(u.v128[0], other.u.v128[0], 5)}; break;
- case 6: return {vextq_s8(u.v128[0], other.u.v128[0], 6)}; break;
- case 7: return {vextq_s8(u.v128[0], other.u.v128[0], 7)}; break;
- case 8: return {vextq_s8(u.v128[0], other.u.v128[0], 8)}; break;
- case 9: return {vextq_s8(u.v128[0], other.u.v128[0], 9)}; break;
- case 10: return {vextq_s8(u.v128[0], other.u.v128[0], 10)}; break;
- case 11: return {vextq_s8(u.v128[0], other.u.v128[0], 11)}; break;
- case 12: return {vextq_s8(u.v128[0], other.u.v128[0], 12)}; break;
- case 13: return {vextq_s8(u.v128[0], other.u.v128[0], 13)}; break;
- case 14: return {vextq_s8(u.v128[0], other.u.v128[0], 14)}; break;
- case 15: return {vextq_s8(u.v128[0], other.u.v128[0], 15)}; break;
+ case 1: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 15)}; break;
+ case 2: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 14)}; break;
+ case 3: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 13)}; break;
+ case 4: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 12)}; break;
+ case 5: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 11)}; break;
+ case 6: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 10)}; break;
+ case 7: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 9)}; break;
+ case 8: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 8)}; break;
+ case 9: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 7)}; break;
+ case 10: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 6)}; break;
+ case 11: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 5)}; break;
+ case 12: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 4)}; break;
+ case 13: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 3)}; break;
+ case 14: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 2)}; break;
+ case 15: return {(m128) vec_sld((int8x16_t) u.v128[0], (int8x16_t) other.u.v128[0], 1)}; break;
default: break;
}
return *this;
- */
- //#warning FIXME
- SuperVector<16> mask = Ones().rshift128_var(16 - 0);
- std::cout<<offset<<std::endl;
- return mask & pshufb(other);
}
-#endif
+
template<>
really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, uint8_t const len)
return mask & pshufb(b);
}
-#ifdef HS_OPTIMIZE
+
template<>
really_inline SuperVector<16> SuperVector<16>::lshift64(uint8_t const N)
{
- //return {vshlq_n_s64(u.v128[0], N)};
- //return {vec_sldw((int64x2_t)u.v128[0], N, 8)};
- std::cout<<N<<std::endl;
- return Zeroes();;
+ uint64x2_t shift_indices = vec_splats((uint64_t)N);
+ return (m128) vec_sl((int64x2_t)u.v128[0] , shift_indices);
}
-#else
-template<>
-really_inline SuperVector<16> SuperVector<16>::lshift64(uint8_t const N)
-{
- /*
- switch(N) {
- case 0: return *this; break;
- case 1: return {vec_sldw((int64x2_t)u.v128[0], 1, 8)}; break;
- case 2: return {vec_sldw((int64x2_t)u.v128[0], 2, 8)}; break;
- case 3: return {vec_sldw((int64x2_t)u.v128[0], 3, 8)}; break;
- case 4: return {vec_sldw((int64x2_t)u.v128[0], 4, 8)}; break;
- case 5: return {vec_sldw((int64x2_t)u.v128[0], 5, 8)}; break;
- case 6: return {vec_sldw((int64x2_t)u.v128[0], 6, 8)}; break;
- case 7: return {vec_sldw((int64x2_t)u.v128[0], 7, 8)}; break;
- case 8: return {vec_sldw((int64x2_t)u.v128[0], 8, 8)}; break;
- case 9: return {vec_sldw((int64x2_t)u.v128[0], 9, 8)}; break;
- case 10: return {vec_sldw((int64x2_t)u.v128[0], 10, 8)}; break;
- case 11: return {vec_sldw((int64x2_t)u.v128[0], 11, 8)}; break;
- case 12: return {vec_sldw((int64x2_t)u.v128[0], 12, 8)}; break;
- case 13: return {vec_sldw((int64x2_t)u.v128[0], 13, 8)}; break;
- case 14: return {vec_sldw((int64x2_t)u.v128[0], 14, 8)}; break;
- case 15: return {vec_sldw((int64x2_t)u.v128[0], 15, 8)}; break;
- case 16: return Zeroes();
- default: break;
- }
- return *this;
- */
- std::cout<<N<<std::endl;
- return Zeroes();
-}
-#endif
+
-#ifdef HS_OPTIMIZE
template<>
really_inline SuperVector<16> SuperVector<16>::rshift64(uint8_t const N)
{
- //return {vshrq_n_s64(u.v128[0], N)};
- //#warning FIXME
- std::cout<<N<<std::endl;
- return Zeroes();
+ uint64x2_t shift_indices = vec_splats((uint64_t)N);
+ return (m128) vec_sr((int64x2_t)u.v128[0] , shift_indices);
}
-#else
-template<>
-really_inline SuperVector<16> SuperVector<16>::rshift64(uint8_t const N)
-{
- /*
- switch(N) {
- case 0: return {vshrq_n_s64(u.v128[0], 0)}; break;
- case 1: return {vshrq_n_s64(u.v128[0], 1)}; break;
- case 2: return {vshrq_n_s64(u.v128[0], 2)}; break;
- case 3: return {vshrq_n_s64(u.v128[0], 3)}; break;
- case 4: return {vshrq_n_s64(u.v128[0], 4)}; break;
- case 5: return {vshrq_n_s64(u.v128[0], 5)}; break;
- case 6: return {vshrq_n_s64(u.v128[0], 6)}; break;
- case 7: return {vshrq_n_s64(u.v128[0], 7)}; break;
- case 8: return {vshrq_n_s64(u.v128[0], 8)}; break;
- case 9: return {vshrq_n_s64(u.v128[0], 9)}; break;
- case 10: return {vshrq_n_s64(u.v128[0], 10)}; break;
- case 11: return {vshrq_n_s64(u.v128[0], 11)}; break;
- case 12: return {vshrq_n_s64(u.v128[0], 12)}; break;
- case 13: return {vshrq_n_s64(u.v128[0], 13)}; break;
- case 14: return {vshrq_n_s64(u.v128[0], 14)}; break;
- case 15: return {vshrq_n_s64(u.v128[0], 15)}; break;
- case 16: return Zeroes();
- default: break;
- }
- return *this;
- */
- //#warning FIXME
- std::cout<<N<<std::endl;
- return Zeroes();
-}
-#endif
template<>
really_inline SuperVector<16> SuperVector<16>::lshift128(uint8_t const N)